1 // BEGIN  Generated code -- do not edit
   2 // Generated by aarch64-asmtest.py
   3     Label back, forth;
   4     __ bind(back);
   5 
   6 // ArithOp
   7     __ add(r26, r23, r13, Assembler::LSL, 32);         //       add     x26, x23, x13, LSL #32
   8     __ sub(r12, r24, r9, Assembler::LSR, 37);          //       sub     x12, x24, x9, LSR #37
   9     __ adds(r28, r15, r8, Assembler::ASR, 39);         //       adds    x28, x15, x8, ASR #39
  10     __ subs(r7, r28, r30, Assembler::ASR, 57);         //       subs    x7, x28, x30, ASR #57
  11     __ addw(r9, r22, r27, Assembler::ASR, 15);         //       add     w9, w22, w27, ASR #15
  12     __ subw(r3, r13, r17, Assembler::ASR, 30);         //       sub     w3, w13, w17, ASR #30
  13     __ addsw(r14, r26, r8, Assembler::ASR, 17);        //       adds    w14, w26, w8, ASR #17
  14     __ subsw(r0, r22, r12, Assembler::ASR, 21);        //       subs    w0, w22, w12, ASR #21
  15     __ andr(r0, r15, r26, Assembler::LSL, 20);         //       and     x0, x15, x26, LSL #20
  16     __ orr(r26, r5, r17, Assembler::LSL, 61);          //       orr     x26, x5, x17, LSL #61
  17     __ eor(r24, r13, r2, Assembler::LSL, 32);          //       eor     x24, x13, x2, LSL #32
  18     __ ands(r28, r3, r17, Assembler::ASR, 35);         //       ands    x28, x3, x17, ASR #35
  19     __ andw(r25, r16, r29, Assembler::LSR, 18);        //       and     w25, w16, w29, LSR #18
  20     __ orrw(r13, r17, r11, Assembler::LSR, 9);         //       orr     w13, w17, w11, LSR #9
  21     __ eorw(r5, r5, r17, Assembler::LSR, 15);          //       eor     w5, w5, w17, LSR #15
  22     __ andsw(r2, r23, r27, Assembler::ASR, 26);        //       ands    w2, w23, w27, ASR #26
  23     __ bic(r27, r28, r16, Assembler::LSR, 45);         //       bic     x27, x28, x16, LSR #45
  24     __ orn(r8, r25, r26, Assembler::ASR, 37);          //       orn     x8, x25, x26, ASR #37
  25     __ eon(r29, r17, r13, Assembler::LSR, 63);         //       eon     x29, x17, x13, LSR #63
  26     __ bics(r28, r24, r2, Assembler::LSR, 31);         //       bics    x28, x24, x2, LSR #31
  27     __ bicw(r19, r26, r7, Assembler::ASR, 3);          //       bic     w19, w26, w7, ASR #3
  28     __ ornw(r6, r24, r10, Assembler::ASR, 3);          //       orn     w6, w24, w10, ASR #3
  29     __ eonw(r4, r21, r1, Assembler::LSR, 29);          //       eon     w4, w21, w1, LSR #29
  30     __ bicsw(r16, r21, r0, Assembler::LSR, 19);        //       bics    w16, w21, w0, LSR #19
  31 
  32 // AddSubImmOp
  33     __ addw(r17, r12, 379u);                           //       add     w17, w12, #379
  34     __ addsw(r30, r1, 22u);                            //       adds    w30, w1, #22
  35     __ subw(r29, r5, 126u);                            //       sub     w29, w5, #126
  36     __ subsw(r6, r24, 960u);                           //       subs    w6, w24, #960
  37     __ add(r0, r13, 104u);                             //       add     x0, x13, #104
  38     __ adds(r8, r6, 663u);                             //       adds    x8, x6, #663
  39     __ sub(r10, r5, 516u);                             //       sub     x10, x5, #516
  40     __ subs(r1, r3, 1012u);                            //       subs    x1, x3, #1012
  41 
  42 // LogicalImmOp
  43     __ andw(r6, r11, 4294049777ull);                   //       and     w6, w11, #0xfff1fff1
  44     __ orrw(r28, r5, 4294966791ull);                   //       orr     w28, w5, #0xfffffe07
  45     __ eorw(r1, r20, 134217216ull);                    //       eor     w1, w20, #0x7fffe00
  46     __ andsw(r7, r17, 1048576ull);                     //       ands    w7, w17, #0x100000
  47     __ andr(r14, r12, 9223372036854775808ull);         //       and     x14, x12, #0x8000000000000000
  48     __ orr(r9, r11, 562675075514368ull);               //       orr     x9, x11, #0x1ffc000000000
  49     __ eor(r17, r0, 18014398509481728ull);             //       eor     x17, x0, #0x3fffffffffff00
  50     __ ands(r1, r8, 18446744073705357315ull);          //       ands    x1, x8, #0xffffffffffc00003
  51 
  52 // AbsOp
  53     __ b(__ pc());                                     //       b       .
  54     __ b(back);                                        //       b       back
  55     __ b(forth);                                       //       b       forth
  56     __ bl(__ pc());                                    //       bl      .
  57     __ bl(back);                                       //       bl      back
  58     __ bl(forth);                                      //       bl      forth
  59 
  60 // RegAndAbsOp
  61     __ cbzw(r10, __ pc());                             //       cbz     w10, .
  62     __ cbzw(r10, back);                                //       cbz     w10, back
  63     __ cbzw(r10, forth);                               //       cbz     w10, forth
  64     __ cbnzw(r8, __ pc());                             //       cbnz    w8, .
  65     __ cbnzw(r8, back);                                //       cbnz    w8, back
  66     __ cbnzw(r8, forth);                               //       cbnz    w8, forth
  67     __ cbz(r11, __ pc());                              //       cbz     x11, .
  68     __ cbz(r11, back);                                 //       cbz     x11, back
  69     __ cbz(r11, forth);                                //       cbz     x11, forth
  70     __ cbnz(r29, __ pc());                             //       cbnz    x29, .
  71     __ cbnz(r29, back);                                //       cbnz    x29, back
  72     __ cbnz(r29, forth);                               //       cbnz    x29, forth
  73     __ adr(r19, __ pc());                              //       adr     x19, .
  74     __ adr(r19, back);                                 //       adr     x19, back
  75     __ adr(r19, forth);                                //       adr     x19, forth
  76     __ _adrp(r19, __ pc());                            //       adrp    x19, .
  77 
  78 // RegImmAbsOp
  79     __ tbz(r22, 6, __ pc());                           //       tbz     x22, #6, .
  80     __ tbz(r22, 6, back);                              //       tbz     x22, #6, back
  81     __ tbz(r22, 6, forth);                             //       tbz     x22, #6, forth
  82     __ tbnz(r12, 11, __ pc());                         //       tbnz    x12, #11, .
  83     __ tbnz(r12, 11, back);                            //       tbnz    x12, #11, back
  84     __ tbnz(r12, 11, forth);                           //       tbnz    x12, #11, forth
  85 
  86 // MoveWideImmOp
  87     __ movnw(r0, 6301, 0);                             //       movn    w0, #6301, lsl 0
  88     __ movzw(r7, 20886, 0);                            //       movz    w7, #20886, lsl 0
  89     __ movkw(r27, 18617, 0);                           //       movk    w27, #18617, lsl 0
  90     __ movn(r12, 22998, 16);                           //       movn    x12, #22998, lsl 16
  91     __ movz(r20, 1532, 16);                            //       movz    x20, #1532, lsl 16
  92     __ movk(r8, 5167, 32);                             //       movk    x8, #5167, lsl 32
  93 
  94 // BitfieldOp
  95     __ sbfm(r15, r17, 24, 28);                         //       sbfm    x15, x17, #24, #28
  96     __ bfmw(r15, r9, 14, 25);                          //       bfm     w15, w9, #14, #25
  97     __ ubfmw(r27, r25, 6, 31);                         //       ubfm    w27, w25, #6, #31
  98     __ sbfm(r19, r2, 23, 31);                          //       sbfm    x19, x2, #23, #31
  99     __ bfm(r12, r21, 10, 6);                           //       bfm     x12, x21, #10, #6
 100     __ ubfm(r22, r0, 26, 16);                          //       ubfm    x22, x0, #26, #16
 101 
 102 // ExtractOp
 103     __ extrw(r3, r3, r20, 27);                         //       extr    w3, w3, w20, #27
 104     __ extr(r8, r30, r3, 54);                          //       extr    x8, x30, x3, #54
 105 
 106 // CondBranchOp
 107     __ br(Assembler::EQ, __ pc());                     //       b.EQ    .
 108     __ br(Assembler::EQ, back);                        //       b.EQ    back
 109     __ br(Assembler::EQ, forth);                       //       b.EQ    forth
 110     __ br(Assembler::NE, __ pc());                     //       b.NE    .
 111     __ br(Assembler::NE, back);                        //       b.NE    back
 112     __ br(Assembler::NE, forth);                       //       b.NE    forth
 113     __ br(Assembler::HS, __ pc());                     //       b.HS    .
 114     __ br(Assembler::HS, back);                        //       b.HS    back
 115     __ br(Assembler::HS, forth);                       //       b.HS    forth
 116     __ br(Assembler::CS, __ pc());                     //       b.CS    .
 117     __ br(Assembler::CS, back);                        //       b.CS    back
 118     __ br(Assembler::CS, forth);                       //       b.CS    forth
 119     __ br(Assembler::LO, __ pc());                     //       b.LO    .
 120     __ br(Assembler::LO, back);                        //       b.LO    back
 121     __ br(Assembler::LO, forth);                       //       b.LO    forth
 122     __ br(Assembler::CC, __ pc());                     //       b.CC    .
 123     __ br(Assembler::CC, back);                        //       b.CC    back
 124     __ br(Assembler::CC, forth);                       //       b.CC    forth
 125     __ br(Assembler::MI, __ pc());                     //       b.MI    .
 126     __ br(Assembler::MI, back);                        //       b.MI    back
 127     __ br(Assembler::MI, forth);                       //       b.MI    forth
 128     __ br(Assembler::PL, __ pc());                     //       b.PL    .
 129     __ br(Assembler::PL, back);                        //       b.PL    back
 130     __ br(Assembler::PL, forth);                       //       b.PL    forth
 131     __ br(Assembler::VS, __ pc());                     //       b.VS    .
 132     __ br(Assembler::VS, back);                        //       b.VS    back
 133     __ br(Assembler::VS, forth);                       //       b.VS    forth
 134     __ br(Assembler::VC, __ pc());                     //       b.VC    .
 135     __ br(Assembler::VC, back);                        //       b.VC    back
 136     __ br(Assembler::VC, forth);                       //       b.VC    forth
 137     __ br(Assembler::HI, __ pc());                     //       b.HI    .
 138     __ br(Assembler::HI, back);                        //       b.HI    back
 139     __ br(Assembler::HI, forth);                       //       b.HI    forth
 140     __ br(Assembler::LS, __ pc());                     //       b.LS    .
 141     __ br(Assembler::LS, back);                        //       b.LS    back
 142     __ br(Assembler::LS, forth);                       //       b.LS    forth
 143     __ br(Assembler::GE, __ pc());                     //       b.GE    .
 144     __ br(Assembler::GE, back);                        //       b.GE    back
 145     __ br(Assembler::GE, forth);                       //       b.GE    forth
 146     __ br(Assembler::LT, __ pc());                     //       b.LT    .
 147     __ br(Assembler::LT, back);                        //       b.LT    back
 148     __ br(Assembler::LT, forth);                       //       b.LT    forth
 149     __ br(Assembler::GT, __ pc());                     //       b.GT    .
 150     __ br(Assembler::GT, back);                        //       b.GT    back
 151     __ br(Assembler::GT, forth);                       //       b.GT    forth
 152     __ br(Assembler::LE, __ pc());                     //       b.LE    .
 153     __ br(Assembler::LE, back);                        //       b.LE    back
 154     __ br(Assembler::LE, forth);                       //       b.LE    forth
 155     __ br(Assembler::AL, __ pc());                     //       b.AL    .
 156     __ br(Assembler::AL, back);                        //       b.AL    back
 157     __ br(Assembler::AL, forth);                       //       b.AL    forth
 158     __ br(Assembler::NV, __ pc());                     //       b.NV    .
 159     __ br(Assembler::NV, back);                        //       b.NV    back
 160     __ br(Assembler::NV, forth);                       //       b.NV    forth
 161 
 162 // ImmOp
 163     __ svc(12999);                                     //       svc     #12999
 164     __ hvc(2665);                                      //       hvc     #2665
 165     __ smc(9002);                                      //       smc     #9002
 166     __ brk(14843);                                     //       brk     #14843
 167     __ hlt(25964);                                     //       hlt     #25964
 168 
 169 // Op
 170     __ nop();                                          //       nop
 171     __ yield();                                        //       yield
 172     __ wfe();                                          //       wfe
 173     __ sev();                                          //       sev
 174     __ sevl();                                         //       sevl
 175     __ autia1716();                                    //       autia1716
 176     __ autiasp();                                      //       autiasp
 177     __ autiaz();                                       //       autiaz
 178     __ autib1716();                                    //       autib1716
 179     __ autibsp();                                      //       autibsp
 180     __ autibz();                                       //       autibz
 181     __ pacia1716();                                    //       pacia1716
 182     __ paciasp();                                      //       paciasp
 183     __ paciaz();                                       //       paciaz
 184     __ pacib1716();                                    //       pacib1716
 185     __ pacibsp();                                      //       pacibsp
 186     __ pacibz();                                       //       pacibz
 187     __ eret();                                         //       eret
 188     __ drps();                                         //       drps
 189     __ isb();                                          //       isb
 190     __ sb();                                           //       sb
 191 
 192 // PostfixExceptionOp
 193     __ wfi();                                          //       wfi
 194     __ xpaclri();                                      //       xpaclri
 195 
 196 // SystemOp
 197     __ dsb(Assembler::ST);                             //       dsb     ST
 198     __ dmb(Assembler::OSHST);                          //       dmb     OSHST
 199 
 200 // OneRegOp
 201     __ br(r16);                                        //       br      x16
 202     __ blr(r20);                                       //       blr     x20
 203     __ paciza(r10);                                    //       paciza  x10
 204     __ pacizb(r27);                                    //       pacizb  x27
 205     __ pacdza(r8);                                     //       pacdza  x8
 206     __ pacdzb(r0);                                     //       pacdzb  x0
 207     __ autiza(r1);                                     //       autiza  x1
 208     __ autizb(r21);                                    //       autizb  x21
 209     __ autdza(r17);                                    //       autdza  x17
 210     __ autdzb(r29);                                    //       autdzb  x29
 211     __ xpacd(r29);                                     //       xpacd   x29
 212     __ braaz(r28);                                     //       braaz   x28
 213     __ brabz(r1);                                      //       brabz   x1
 214     __ blraaz(r23);                                    //       blraaz  x23
 215     __ blrabz(r21);                                    //       blrabz  x21
 216 
 217 // SystemOneRegOp
 218     __ msr(3, 4, 4, 1, r20);                           //       msr     fpsr, x20
 219 
 220 // SystemOneRegOp
 221     __ msr(3, 4, 2, 0, r22);                           //       msr     nzcv, x22
 222 
 223 // OneRegSystemOp
 224     __ mrs(3, 4, 4, 1, r27);                           //       mrs     x27, fpsr
 225 
 226 // OneRegSystemOp
 227     __ mrs(3, 4, 2, 0, r19);                           //       mrs     x19, nzcv
 228 
 229 // OneRegSystemOp
 230     __ mrs(3, 0, 0, 7, r11);                           //       mrs     x11, dczid_el0
 231 
 232 // OneRegSystemOp
 233     __ mrs(3, 0, 0, 1, r16);                           //       mrs     x16, ctr_el0
 234 
 235 // PostfixExceptionOneRegOp
 236     __ xpaci(r6);                                      //       xpaci   x6
 237 
 238 // LoadStoreExclusiveOp
 239     __ stxr(r17, r0, r4);                              //       stxr    w17, x0, [x4]
 240     __ stlxr(r10, r24, r22);                           //       stlxr   w10, x24, [x22]
 241     __ ldxr(r10, r19);                                 //       ldxr    x10, [x19]
 242     __ ldaxr(r1, r5);                                  //       ldaxr   x1, [x5]
 243     __ stlr(r30, r8);                                  //       stlr    x30, [x8]
 244     __ ldar(r12, r17);                                 //       ldar    x12, [x17]
 245 
 246 // LoadStoreExclusiveOp
 247     __ stxrw(r9, r14, r7);                             //       stxr    w9, w14, [x7]
 248     __ stlxrw(r1, r5, r16);                            //       stlxr   w1, w5, [x16]
 249     __ ldxrw(r2, r12);                                 //       ldxr    w2, [x12]
 250     __ ldaxrw(r10, r12);                               //       ldaxr   w10, [x12]
 251     __ stlrw(r3, r28);                                 //       stlr    w3, [x28]
 252     __ ldarw(r14, r26);                                //       ldar    w14, [x26]
 253 
 254 // LoadStoreExclusiveOp
 255     __ stxrh(r30, r10, r14);                           //       stxrh   w30, w10, [x14]
 256     __ stlxrh(r21, r13, r9);                           //       stlxrh  w21, w13, [x9]
 257     __ ldxrh(r22, r27);                                //       ldxrh   w22, [x27]
 258     __ ldaxrh(r28, r19);                               //       ldaxrh  w28, [x19]
 259     __ stlrh(r11, r30);                                //       stlrh   w11, [x30]
 260     __ ldarh(r19, r2);                                 //       ldarh   w19, [x2]
 261 
 262 // LoadStoreExclusiveOp
 263     __ stxrb(r2, r23, r1);                             //       stxrb   w2, w23, [x1]
 264     __ stlxrb(r0, r12, r16);                           //       stlxrb  w0, w12, [x16]
 265     __ ldxrb(r13, r15);                                //       ldxrb   w13, [x15]
 266     __ ldaxrb(r17, r21);                               //       ldaxrb  w17, [x21]
 267     __ stlrb(r13, r11);                                //       stlrb   w13, [x11]
 268     __ ldarb(r30, r8);                                 //       ldarb   w30, [x8]
 269 
 270 // LoadStoreExclusiveOp
 271     __ ldxp(r24, r13, r11);                            //       ldxp    x24, x13, [x11]
 272     __ ldaxp(r1, r26, r21);                            //       ldaxp   x1, x26, [x21]
 273     __ stxp(r27, r13, r20, r3);                        //       stxp    w27, x13, x20, [x3]
 274     __ stlxp(r12, r6, r1, r29);                        //       stlxp   w12, x6, x1, [x29]
 275 
 276 // LoadStoreExclusiveOp
 277     __ ldxpw(r6, r4, r11);                             //       ldxp    w6, w4, [x11]
 278     __ ldaxpw(r16, r4, r30);                           //       ldaxp   w16, w4, [x30]
 279     __ stxpw(r30, r4, r12, r21);                       //       stxp    w30, w4, w12, [x21]
 280     __ stlxpw(r27, r15, r28, r9);                      //       stlxp   w27, w15, w28, [x9]
 281 
 282 // base_plus_unscaled_offset
 283 // LoadStoreOp
 284     __ str(r25, Address(r15, 1));                      //       str     x25, [x15, 1]
 285     __ strw(r2, Address(r1, -79));                     //       str     w2, [x1, -79]
 286     __ strb(r20, Address(r26, -22));                   //       strb    w20, [x26, -22]
 287     __ strh(r23, Address(r30, 22));                    //       strh    w23, [x30, 22]
 288     __ ldr(r26, Address(r28, -49));                    //       ldr     x26, [x28, -49]
 289     __ ldrw(r9, Address(r24, -128));                   //       ldr     w9, [x24, -128]
 290     __ ldrb(r12, Address(r12, -30));                   //       ldrb    w12, [x12, -30]
 291     __ ldrh(r1, Address(r15, 5));                      //       ldrh    w1, [x15, 5]
 292     __ ldrsb(r24, Address(r14, -31));                  //       ldrsb   x24, [x14, -31]
 293     __ ldrsh(r24, Address(r15, -6));                   //       ldrsh   x24, [x15, -6]
 294     __ ldrshw(r5, Address(r3, 12));                    //       ldrsh   w5, [x3, 12]
 295     __ ldrsw(r27, Address(r24, 17));                   //       ldrsw   x27, [x24, 17]
 296     __ ldrd(v13, Address(r29, -35));                   //       ldr     d13, [x29, -35]
 297     __ ldrs(v23, Address(r9, -47));                    //       ldr     s23, [x9, -47]
 298     __ strd(v11, Address(r0, 9));                      //       str     d11, [x0, 9]
 299     __ strs(v21, Address(r0, -127));                   //       str     s21, [x0, -127]
 300 
 301 // pre
 302 // LoadStoreOp
 303     __ str(r29, Address(__ pre(r3, -114)));            //       str     x29, [x3, -114]!
 304     __ strw(r17, Address(__ pre(r4, -72)));            //       str     w17, [x4, -72]!
 305     __ strb(r0, Address(__ pre(r2, -17)));             //       strb    w0, [x2, -17]!
 306     __ strh(r29, Address(__ pre(r1, 7)));              //       strh    w29, [x1, 7]!
 307     __ ldr(r16, Address(__ pre(r21, -133)));           //       ldr     x16, [x21, -133]!
 308     __ ldrw(r20, Address(__ pre(r14, 19)));            //       ldr     w20, [x14, 19]!
 309     __ ldrb(r22, Address(__ pre(r14, -3)));            //       ldrb    w22, [x14, -3]!
 310     __ ldrh(r15, Address(__ pre(r17, 9)));             //       ldrh    w15, [x17, 9]!
 311     __ ldrsb(r10, Address(__ pre(r15, -19)));          //       ldrsb   x10, [x15, -19]!
 312     __ ldrsh(r20, Address(__ pre(r12, -25)));          //       ldrsh   x20, [x12, -25]!
 313     __ ldrshw(r21, Address(__ pre(r10, -29)));         //       ldrsh   w21, [x10, -29]!
 314     __ ldrsw(r19, Address(__ pre(r0, 5)));             //       ldrsw   x19, [x0, 5]!
 315     __ ldrd(v0, Address(__ pre(r14, -54)));            //       ldr     d0, [x14, -54]!
 316     __ ldrs(v3, Address(__ pre(r1, 40)));              //       ldr     s3, [x1, 40]!
 317     __ strd(v4, Address(__ pre(r14, -94)));            //       str     d4, [x14, -94]!
 318     __ strs(v18, Address(__ pre(r28, -54)));           //       str     s18, [x28, -54]!
 319 
 320 // post
 321 // LoadStoreOp
 322     __ str(r22, Address(__ post(r15, -185)));          //       str     x22, [x15], -185
 323     __ strw(r17, Address(__ post(r14, -7)));           //       str     w17, [x14], -7
 324     __ strb(r30, Address(__ post(r11, -25)));          //       strb    w30, [x11], -25
 325     __ strh(r1, Address(__ post(r11, 20)));            //       strh    w1, [x11], 20
 326     __ ldr(r22, Address(__ post(r1, 2)));              //       ldr     x22, [x1], 2
 327     __ ldrw(r2, Address(__ post(r23, -119)));          //       ldr     w2, [x23], -119
 328     __ ldrb(r3, Address(__ post(r27, -12)));           //       ldrb    w3, [x27], -12
 329     __ ldrh(r16, Address(__ post(r7, -37)));           //       ldrh    w16, [x7], -37
 330     __ ldrsb(r15, Address(__ post(r26, 3)));           //       ldrsb   x15, [x26], 3
 331     __ ldrsh(r7, Address(__ post(r15, -30)));          //       ldrsh   x7, [x15], -30
 332     __ ldrshw(r3, Address(__ post(r11, -48)));         //       ldrsh   w3, [x11], -48
 333     __ ldrsw(r25, Address(__ post(r23, 22)));          //       ldrsw   x25, [x23], 22
 334     __ ldrd(v0, Address(__ post(r10, -215)));          //       ldr     d0, [x10], -215
 335     __ ldrs(v19, Address(__ post(r6, 55)));            //       ldr     s19, [x6], 55
 336     __ strd(v14, Address(__ post(r21, -234)));         //       str     d14, [x21], -234
 337     __ strs(v0, Address(__ post(r22, -70)));           //       str     s0, [x22], -70
 338 
 339 // base_plus_reg
 340 // LoadStoreOp
 341     __ str(r27, Address(r19, r0, Address::sxtx(0)));   //       str     x27, [x19, x0, sxtx #0]
 342     __ strw(r8, Address(r6, r13, Address::lsl(0)));    //       str     w8, [x6, x13, lsl #0]
 343     __ strb(r4, Address(r16, r22, Address::lsl(0)));   //       strb    w4, [x16, x22, lsl #0]
 344     __ strh(r25, Address(r26, r15, Address::uxtw(0))); //       strh    w25, [x26, w15, uxtw #0]
 345     __ ldr(r4, Address(r5, r24, Address::sxtw(0)));    //       ldr     x4, [x5, w24, sxtw #0]
 346     __ ldrw(r4, Address(r17, r7, Address::uxtw(0)));   //       ldr     w4, [x17, w7, uxtw #0]
 347     __ ldrb(r17, Address(r7, r11, Address::lsl(0)));   //       ldrb    w17, [x7, x11, lsl #0]
 348     __ ldrh(r0, Address(r30, r23, Address::lsl(0)));   //       ldrh    w0, [x30, x23, lsl #0]
 349     __ ldrsb(r10, Address(r22, r1, Address::uxtw(0))); //       ldrsb   x10, [x22, w1, uxtw #0]
 350     __ ldrsh(r21, Address(r30, r30, Address::sxtw(1))); //      ldrsh   x21, [x30, w30, sxtw #1]
 351     __ ldrshw(r11, Address(r10, r28, Address::sxtw(1))); //     ldrsh   w11, [x10, w28, sxtw #1]
 352     __ ldrsw(r28, Address(r19, r10, Address::uxtw(0))); //      ldrsw   x28, [x19, w10, uxtw #0]
 353     __ ldrd(v30, Address(r29, r14, Address::sxtw(0))); //       ldr     d30, [x29, w14, sxtw #0]
 354     __ ldrs(v8, Address(r5, r5, Address::sxtw(2)));    //       ldr     s8, [x5, w5, sxtw #2]
 355     __ strd(v25, Address(r8, r13, Address::sxtx(0)));  //       str     d25, [x8, x13, sxtx #0]
 356     __ strs(v17, Address(r24, r26, Address::lsl(2)));  //       str     s17, [x24, x26, lsl #2]
 357 
 358 // base_plus_scaled_offset
 359 // LoadStoreOp
 360     __ str(r19, Address(r12, 15904));                  //       str     x19, [x12, 15904]
 361     __ strw(r23, Address(r15, 7892));                  //       str     w23, [x15, 7892]
 362     __ strb(r29, Address(r13, 1970));                  //       strb    w29, [x13, 1970]
 363     __ strh(r11, Address(r7, 3094));                   //       strh    w11, [x7, 3094]
 364     __ ldr(r10, Address(r24, 14992));                  //       ldr     x10, [x24, 14992]
 365     __ ldrw(r16, Address(r0, 6160));                   //       ldr     w16, [x0, 6160]
 366     __ ldrb(r20, Address(r1, 2032));                   //       ldrb    w20, [x1, 2032]
 367     __ ldrh(r1, Address(r17, 4056));                   //       ldrh    w1, [x17, 4056]
 368     __ ldrsb(r17, Address(r25, 1889));                 //       ldrsb   x17, [x25, 1889]
 369     __ ldrsh(r27, Address(r25, 3964));                 //       ldrsh   x27, [x25, 3964]
 370     __ ldrshw(r14, Address(r17, 3724));                //       ldrsh   w14, [x17, 3724]
 371     __ ldrsw(r10, Address(r7, 6372));                  //       ldrsw   x10, [x7, 6372]
 372     __ ldrd(v3, Address(r25, 12392));                  //       ldr     d3, [x25, 12392]
 373     __ ldrs(v12, Address(r9, 7840));                   //       ldr     s12, [x9, 7840]
 374     __ strd(v24, Address(r1, 12728));                  //       str     d24, [x1, 12728]
 375     __ strs(v3, Address(r20, 6924));                   //       str     s3, [x20, 6924]
 376 
 377 // pcrel
 378 // LoadStoreOp
 379     __ ldr(r2, back);                                  //       ldr     x2, back
 380     __ ldrw(r29, __ pc());                             //       ldr     w29, .
 381 
 382 // LoadStoreOp
 383     __ prfm(Address(r14, 93));                         //       prfm    PLDL1KEEP, [x14, 93]
 384 
 385 // LoadStoreOp
 386     __ prfm(back);                                     //       prfm    PLDL1KEEP, back
 387 
 388 // LoadStoreOp
 389     __ prfm(Address(r1, r7, Address::lsl(3)));         //       prfm    PLDL1KEEP, [x1, x7, lsl #3]
 390 
 391 // LoadStoreOp
 392     __ prfm(Address(r17, 12288));                      //       prfm    PLDL1KEEP, [x17, 12288]
 393 
 394 // AddSubCarryOp
 395     __ adcw(r1, r24, r3);                              //       adc     w1, w24, w3
 396     __ adcsw(r17, r24, r20);                           //       adcs    w17, w24, w20
 397     __ sbcw(r11, r0, r13);                             //       sbc     w11, w0, w13
 398     __ sbcsw(r28, r10, r7);                            //       sbcs    w28, w10, w7
 399     __ adc(r4, r15, r16);                              //       adc     x4, x15, x16
 400     __ adcs(r2, r12, r20);                             //       adcs    x2, x12, x20
 401     __ sbc(r29, r13, r13);                             //       sbc     x29, x13, x13
 402     __ sbcs(r14, r6, r12);                             //       sbcs    x14, x6, x12
 403 
 404 // AddSubExtendedOp
 405     __ addw(r20, r12, r17, ext::sxtx, 4);              //       add     w20, w12, w17, sxtx #4
 406     __ addsw(r27, r11, r0, ext::uxtx, 3);              //       adds    w27, w11, w0, uxtx #3
 407     __ sub(r7, r1, r9, ext::sxtx, 4);                  //       sub     x7, x1, x9, sxtx #4
 408     __ subsw(r3, r27, r1, ext::uxtb, 3);               //       subs    w3, w27, w1, uxtb #3
 409     __ add(r13, r26, r12, ext::sxth, 4);               //       add     x13, x26, x12, sxth #4
 410     __ adds(r17, r5, r10, ext::sxtb, 2);               //       adds    x17, x5, x10, sxtb #2
 411     __ sub(r30, r8, r15, ext::uxtw, 4);                //       sub     x30, x8, x15, uxtw #4
 412     __ subs(r19, r23, r19, ext::uxth, 4);              //       subs    x19, x23, x19, uxth #4
 413 
 414 // ConditionalCompareOp
 415     __ ccmnw(r29, r5, 10u, Assembler::LO);             //       ccmn    w29, w5, #10, LO
 416     __ ccmpw(r9, r13, 11u, Assembler::LO);             //       ccmp    w9, w13, #11, LO
 417     __ ccmn(r10, r4, 6u, Assembler::HS);               //       ccmn    x10, x4, #6, HS
 418     __ ccmp(r12, r2, 12u, Assembler::HI);              //       ccmp    x12, x2, #12, HI
 419 
 420 // ConditionalCompareImmedOp
 421     __ ccmnw(r16, 6, 2, Assembler::VS);                //       ccmn    w16, #6, #2, VS
 422     __ ccmpw(r7, 11, 13, Assembler::VS);               //       ccmp    w7, #11, #13, VS
 423     __ ccmn(r27, 10, 11, Assembler::LS);               //       ccmn    x27, #10, #11, LS
 424     __ ccmp(r3, 13, 13, Assembler::LE);                //       ccmp    x3, #13, #13, LE
 425 
 426 // ConditionalSelectOp
 427     __ cselw(r26, r27, r10, Assembler::VS);            //       csel    w26, w27, w10, VS
 428     __ csincw(r10, r21, r28, Assembler::LE);           //       csinc   w10, w21, w28, LE
 429     __ csinvw(r23, r9, r27, Assembler::LE);            //       csinv   w23, w9, w27, LE
 430     __ csnegw(r10, r29, r15, Assembler::LE);           //       csneg   w10, w29, w15, LE
 431     __ csel(r30, r25, r21, Assembler::HS);             //       csel    x30, x25, x21, HS
 432     __ csinc(r0, r17, r21, Assembler::GT);             //       csinc   x0, x17, x21, GT
 433     __ csinv(r16, r21, r20, Assembler::CS);            //       csinv   x16, x21, x20, CS
 434     __ csneg(r19, r30, r3, Assembler::LS);             //       csneg   x19, x30, x3, LS
 435 
 436 // TwoRegOp
 437     __ rbitw(r19, r11);                                //       rbit    w19, w11
 438     __ rev16w(r24, r0);                                //       rev16   w24, w0
 439     __ revw(r27, r25);                                 //       rev     w27, w25
 440     __ clzw(r14, r3);                                  //       clz     w14, w3
 441     __ clsw(r14, r17);                                 //       cls     w14, w17
 442     __ rbit(r7, r15);                                  //       rbit    x7, x15
 443     __ rev16(r24, r28);                                //       rev16   x24, x28
 444     __ rev32(r17, r25);                                //       rev32   x17, x25
 445     __ rev(r2, r26);                                   //       rev     x2, x26
 446     __ clz(r28, r5);                                   //       clz     x28, x5
 447     __ cls(r25, r26);                                  //       cls     x25, x26
 448     __ pacia(r27, r16);                                //       pacia   x27, x16
 449     __ pacib(r17, r6);                                 //       pacib   x17, x6
 450     __ pacda(r21, r12);                                //       pacda   x21, x12
 451     __ pacdb(r0, r4);                                  //       pacdb   x0, x4
 452     __ autia(r12, r27);                                //       autia   x12, x27
 453     __ autib(r17, r28);                                //       autib   x17, x28
 454     __ autda(r28, r2);                                 //       autda   x28, x2
 455     __ autdb(r17, r10);                                //       autdb   x17, x10
 456     __ braa(r15, r14);                                 //       braa    x15, x14
 457     __ brab(r14, r3);                                  //       brab    x14, x3
 458     __ blraa(r25, r15);                                //       blraa   x25, x15
 459     __ blrab(r19, r14);                                //       blrab   x19, x14
 460 
 461 // ThreeRegOp
 462     __ udivw(r5, r16, r4);                             //       udiv    w5, w16, w4
 463     __ sdivw(r26, r25, r4);                            //       sdiv    w26, w25, w4
 464     __ lslvw(r2, r2, r12);                             //       lslv    w2, w2, w12
 465     __ lsrvw(r29, r17, r8);                            //       lsrv    w29, w17, w8
 466     __ asrvw(r7, r3, r4);                              //       asrv    w7, w3, w4
 467     __ rorvw(r25, r4, r26);                            //       rorv    w25, w4, w26
 468     __ udiv(r25, r4, r17);                             //       udiv    x25, x4, x17
 469     __ sdiv(r0, r26, r17);                             //       sdiv    x0, x26, x17
 470     __ lslv(r23, r15, r21);                            //       lslv    x23, x15, x21
 471     __ lsrv(r28, r17, r27);                            //       lsrv    x28, x17, x27
 472     __ asrv(r10, r3, r0);                              //       asrv    x10, x3, x0
 473     __ rorv(r7, r25, r9);                              //       rorv    x7, x25, x9
 474     __ umulh(r6, r15, r29);                            //       umulh   x6, x15, x29
 475     __ smulh(r15, r10, r2);                            //       smulh   x15, x10, x2
 476 
 477 // FourRegMulOp
 478     __ maddw(r17, r7, r11, r11);                       //       madd    w17, w7, w11, w11
 479     __ msubw(r23, r7, r29, r23);                       //       msub    w23, w7, w29, w23
 480     __ madd(r14, r27, r11, r11);                       //       madd    x14, x27, x11, x11
 481     __ msub(r4, r24, r12, r15);                        //       msub    x4, x24, x12, x15
 482     __ smaddl(r14, r20, r11, r28);                     //       smaddl  x14, w20, w11, x28
 483     __ smsubl(r13, r11, r12, r23);                     //       smsubl  x13, w11, w12, x23
 484     __ umaddl(r30, r26, r14, r9);                      //       umaddl  x30, w26, w14, x9
 485     __ umsubl(r13, r10, r7, r5);                       //       umsubl  x13, w10, w7, x5
 486 
 487 // ThreeRegFloatOp
 488     __ fabdh(v30, v15, v3);                            //       fabd    h30, h15, h3
 489     __ fmulh(v12, v12, v16);                           //       fmul    h12, h12, h16
 490     __ fdivh(v31, v31, v18);                           //       fdiv    h31, h31, h18
 491     __ faddh(v19, v21, v16);                           //       fadd    h19, h21, h16
 492     __ fsubh(v15, v10, v21);                           //       fsub    h15, h10, h21
 493     __ fmaxh(v2, v10, v28);                            //       fmax    h2, h10, h28
 494     __ fminh(v7, v30, v31);                            //       fmin    h7, h30, h31
 495     __ fnmulh(v18, v1, v2);                            //       fnmul   h18, h1, h2
 496     __ fabds(v6, v10, v3);                             //       fabd    s6, s10, s3
 497     __ fmuls(v25, v11, v7);                            //       fmul    s25, s11, s7
 498     __ fdivs(v1, v12, v0);                             //       fdiv    s1, s12, s0
 499     __ fadds(v3, v19, v29);                            //       fadd    s3, s19, s29
 500     __ fsubs(v6, v23, v6);                             //       fsub    s6, s23, s6
 501     __ fmaxs(v0, v28, v27);                            //       fmax    s0, s28, s27
 502     __ fmins(v2, v5, v7);                              //       fmin    s2, s5, s7
 503     __ fnmuls(v29, v12, v25);                          //       fnmul   s29, s12, s25
 504     __ fabdd(v13, v12, v24);                           //       fabd    d13, d12, d24
 505     __ fmuld(v19, v8, v18);                            //       fmul    d19, d8, d18
 506     __ fdivd(v22, v26, v21);                           //       fdiv    d22, d26, d21
 507     __ faddd(v20, v19, v2);                            //       fadd    d20, d19, d2
 508     __ fsubd(v30, v22, v8);                            //       fsub    d30, d22, d8
 509     __ fmaxd(v22, v19, v21);                           //       fmax    d22, d19, d21
 510     __ fmind(v12, v18, v21);                           //       fmin    d12, d18, d21
 511     __ fnmuld(v6, v16, v3);                            //       fnmul   d6, d16, d3
 512 
 513 // FourRegFloatOp
 514     __ fmaddh(v3, v29, v3, v28);                       //       fmadd   h3, h29, h3, h28
 515     __ fmadds(v15, v14, v10, v13);                     //       fmadd   s15, s14, s10, s13
 516     __ fmsubs(v12, v18, v10, v26);                     //       fmsub   s12, s18, s10, s26
 517     __ fnmadds(v7, v7, v15, v29);                      //       fnmadd  s7, s7, s15, s29
 518     __ fnmadds(v0, v23, v0, v12);                      //       fnmadd  s0, s23, s0, s12
 519     __ fmaddd(v24, v14, v13, v8);                      //       fmadd   d24, d14, d13, d8
 520     __ fmsubd(v15, v7, v9, v20);                       //       fmsub   d15, d7, d9, d20
 521     __ fnmaddd(v19, v29, v31, v16);                    //       fnmadd  d19, d29, d31, d16
 522     __ fnmaddd(v2, v9, v16, v21);                      //       fnmadd  d2, d9, d16, d21
 523 
 524 // TwoRegFloatOp
 525     __ fmovs(v30, v4);                                 //       fmov    s30, s4
 526     __ fabss(v1, v27);                                 //       fabs    s1, s27
 527     __ fnegs(v25, v24);                                //       fneg    s25, s24
 528     __ fsqrts(v14, v21);                               //       fsqrt   s14, s21
 529     __ fcvts(v13, v6);                                 //       fcvt    d13, s6
 530     __ fcvtsh(v12, v25);                               //       fcvt    h12, s25
 531     __ fcvths(v25, v30);                               //       fcvt    s25, h30
 532     __ fmovd(v28, v21);                                //       fmov    d28, d21
 533     __ fabsd(v16, v23);                                //       fabs    d16, d23
 534     __ fnegd(v5, v29);                                 //       fneg    d5, d29
 535     __ fsqrtd(v22, v19);                               //       fsqrt   d22, d19
 536     __ fcvtd(v13, v20);                                //       fcvt    s13, d20
 537     __ fsqrth(v19, v28);                               //       fsqrt   h19, h28
 538 
 539 // FloatConvertOp
 540     __ fcvtzsw(r17, v6);                               //       fcvtzs  w17, s6
 541     __ fcvtzs(r13, v7);                                //       fcvtzs  x13, s7
 542     __ fcvtzdw(r28, v26);                              //       fcvtzs  w28, d26
 543     __ fcvtzd(r17, v6);                                //       fcvtzs  x17, d6
 544     __ scvtfws(v1, r4);                                //       scvtf   s1, w4
 545     __ scvtfs(v14, r20);                               //       scvtf   s14, x20
 546     __ scvtfwd(v7, r21);                               //       scvtf   d7, w21
 547     __ scvtfd(v27, r23);                               //       scvtf   d27, x23
 548     __ fcvtassw(r13, v20);                             //       fcvtas  w13, s20
 549     __ fcvtasd(r30, v28);                              //       fcvtas  x30, d28
 550     __ fcvtmssw(r10, v21);                             //       fcvtms  w10, s21
 551     __ fcvtmsd(r5, v17);                               //       fcvtms  x5, d17
 552     __ fmovs(r11, v14);                                //       fmov    w11, s14
 553     __ fmovd(r13, v21);                                //       fmov    x13, d21
 554     __ fmovs(v27, r14);                                //       fmov    s27, w14
 555     __ fmovd(v4, r23);                                 //       fmov    d4, x23
 556 
 557 // TwoRegFloatOp
 558     __ fcmps(v24, v30);                                //       fcmp    s24, s30
 559     __ fcmpd(v12, v14);                                //       fcmp    d12, d14
 560     __ fcmps(v17, 0.0);                                //       fcmp    s17, #0.0
 561     __ fcmpd(v28, 0.0);                                //       fcmp    d28, #0.0
 562 
 563 // LoadStorePairOp
 564     __ stpw(r0, r6, Address(r26, 16));                 //       stp     w0, w6, [x26, #16]
 565     __ ldpw(r0, r30, Address(r6, -32));                //       ldp     w0, w30, [x6, #-32]
 566     __ ldpsw(r16, r2, Address(r11, -208));             //       ldpsw   x16, x2, [x11, #-208]
 567     __ stp(r15, r0, Address(r12, 128));                //       stp     x15, x0, [x12, #128]
 568     __ ldp(r7, r30, Address(r23, 32));                 //       ldp     x7, x30, [x23, #32]
 569 
 570 // LoadStorePairOp
 571     __ stpw(r26, r15, Address(__ pre(r7, -256)));      //       stp     w26, w15, [x7, #-256]!
 572     __ ldpw(r11, r15, Address(__ pre(r10, -32)));      //       ldp     w11, w15, [x10, #-32]!
 573     __ ldpsw(r19, r16, Address(__ pre(r1, 64)));       //       ldpsw   x19, x16, [x1, #64]!
 574     __ stp(r14, r9, Address(__ pre(r0, 128)));         //       stp     x14, x9, [x0, #128]!
 575     __ ldp(r27, r3, Address(__ pre(r12, -96)));        //       ldp     x27, x3, [x12, #-96]!
 576 
 577 // LoadStorePairOp
 578     __ stpw(r8, r11, Address(__ post(r12, -256)));     //       stp     w8, w11, [x12], #-256
 579     __ ldpw(r10, r16, Address(__ post(r4, 64)));       //       ldp     w10, w16, [x4], #64
 580     __ ldpsw(r10, r30, Address(__ post(r19, -64)));    //       ldpsw   x10, x30, [x19], #-64
 581     __ stp(r24, r2, Address(__ post(r15, -96)));       //       stp     x24, x2, [x15], #-96
 582     __ ldp(r24, r10, Address(__ post(r16, 80)));       //       ldp     x24, x10, [x16], #80
 583 
 584 // LoadStorePairOp
 585     __ stnpw(r30, r21, Address(r29, 16));              //       stnp    w30, w21, [x29, #16]
 586     __ ldnpw(r8, r30, Address(r10, -112));             //       ldnp    w8, w30, [x10, #-112]
 587     __ stnp(r30, r26, Address(r6, -128));              //       stnp    x30, x26, [x6, #-128]
 588     __ ldnp(r24, r2, Address(r20, 64));                //       ldnp    x24, x2, [x20, #64]
 589 
 590 // LdStNEONOp
 591     __ ld1(v31, __ T8B, Address(r25));                 //       ld1     {v31.8B}, [x25]
 592     __ ld1(v5, v6, __ T16B, Address(__ post(r15, 32))); //      ld1     {v5.16B, v6.16B}, [x15], 32
 593     __ ld1(v10, v11, v12, __ T1D, Address(__ post(r7, r13))); //        ld1     {v10.1D, v11.1D, v12.1D}, [x7], x13
 594     __ ld1(v13, v14, v15, v16, __ T8H, Address(__ post(r16, 64))); //   ld1     {v13.8H, v14.8H, v15.8H, v16.8H}, [x16], 64
 595     __ ld1r(v7, __ T8B, Address(r17));                 //       ld1r    {v7.8B}, [x17]
 596     __ ld1r(v16, __ T4S, Address(__ post(r25, 4)));    //       ld1r    {v16.4S}, [x25], 4
 597     __ ld1r(v11, __ T1D, Address(__ post(r3, r7)));    //       ld1r    {v11.1D}, [x3], x7
 598     __ ld2(v13, v14, __ T2D, Address(r7));             //       ld2     {v13.2D, v14.2D}, [x7]
 599     __ ld2(v9, v10, __ T4H, Address(__ post(r27, 16))); //      ld2     {v9.4H, v10.4H}, [x27], 16
 600     __ ld2r(v6, v7, __ T16B, Address(r26));            //       ld2r    {v6.16B, v7.16B}, [x26]
 601     __ ld2r(v23, v24, __ T2S, Address(__ post(r16, 8))); //     ld2r    {v23.2S, v24.2S}, [x16], 8
 602     __ ld2r(v6, v7, __ T2D, Address(__ post(r13, r8))); //      ld2r    {v6.2D, v7.2D}, [x13], x8
 603     __ ld3(v20, v21, v22, __ T4S, Address(__ post(r1, r26))); //        ld3     {v20.4S, v21.4S, v22.4S}, [x1], x26
 604     __ ld3(v15, v16, v17, __ T2S, Address(r15));       //       ld3     {v15.2S, v16.2S, v17.2S}, [x15]
 605     __ ld3r(v29, v30, v31, __ T8H, Address(r22));      //       ld3r    {v29.8H, v30.8H, v31.8H}, [x22]
 606     __ ld3r(v6, v7, v8, __ T4S, Address(__ post(r10, 12))); //  ld3r    {v6.4S, v7.4S, v8.4S}, [x10], 12
 607     __ ld3r(v15, v16, v17, __ T1D, Address(__ post(r6, r15))); //       ld3r    {v15.1D, v16.1D, v17.1D}, [x6], x15
 608     __ ld4(v6, v7, v8, v9, __ T8H, Address(__ post(r10, 64))); //       ld4     {v6.8H, v7.8H, v8.8H, v9.8H}, [x10], 64
 609     __ ld4(v11, v12, v13, v14, __ T8B, Address(__ post(r3, r7))); //    ld4     {v11.8B, v12.8B, v13.8B, v14.8B}, [x3], x7
 610     __ ld4r(v12, v13, v14, v15, __ T8B, Address(r25)); //       ld4r    {v12.8B, v13.8B, v14.8B, v15.8B}, [x25]
 611     __ ld4r(v11, v12, v13, v14, __ T4H, Address(__ post(r15, 8))); //   ld4r    {v11.4H, v12.4H, v13.4H, v14.4H}, [x15], 8
 612     __ ld4r(v30, v31, v0, v1, __ T2S, Address(__ post(r6, r28))); //    ld4r    {v30.2S, v31.2S, v0.2S, v1.2S}, [x6], x28
 613 
 614 // NEONReduceInstruction
 615     __ addv(v27, __ T8B, v28);                         //       addv    b27, v28.8B
 616     __ addv(v28, __ T16B, v29);                        //       addv    b28, v29.16B
 617     __ addv(v1, __ T4H, v2);                           //       addv    h1, v2.4H
 618     __ addv(v28, __ T8H, v29);                         //       addv    h28, v29.8H
 619     __ addv(v1, __ T4S, v2);                           //       addv    s1, v2.4S
 620     __ smaxv(v20, __ T8B, v21);                        //       smaxv   b20, v21.8B
 621     __ smaxv(v29, __ T16B, v30);                       //       smaxv   b29, v30.16B
 622     __ smaxv(v16, __ T4H, v17);                        //       smaxv   h16, v17.4H
 623     __ smaxv(v13, __ T8H, v14);                        //       smaxv   h13, v14.8H
 624     __ smaxv(v10, __ T4S, v11);                        //       smaxv   s10, v11.4S
 625     __ fmaxv(v29, __ T4S, v30);                        //       fmaxv   s29, v30.4S
 626     __ sminv(v29, __ T8B, v30);                        //       sminv   b29, v30.8B
 627     __ uminv(v19, __ T8B, v20);                        //       uminv   b19, v20.8B
 628     __ sminv(v22, __ T16B, v23);                       //       sminv   b22, v23.16B
 629     __ uminv(v10, __ T16B, v11);                       //       uminv   b10, v11.16B
 630     __ sminv(v4, __ T4H, v5);                          //       sminv   h4, v5.4H
 631     __ uminv(v31, __ T4H, v0);                         //       uminv   h31, v0.4H
 632     __ sminv(v21, __ T8H, v22);                        //       sminv   h21, v22.8H
 633     __ uminv(v8, __ T8H, v9);                          //       uminv   h8, v9.8H
 634     __ sminv(v31, __ T4S, v0);                         //       sminv   s31, v0.4S
 635     __ uminv(v19, __ T4S, v20);                        //       uminv   s19, v20.4S
 636     __ fminv(v10, __ T4S, v11);                        //       fminv   s10, v11.4S
 637     __ fmaxp(v28, v29, __ S);                          //       fmaxp   s28, v29.2S
 638     __ fmaxp(v2, v3, __ D);                            //       fmaxp   d2, v3.2D
 639     __ fminp(v25, v26, __ S);                          //       fminp   s25, v26.2S
 640     __ fminp(v5, v6, __ D);                            //       fminp   d5, v6.2D
 641 
 642 // NEONFloatCompareWithZero
 643     __ fcm(Assembler::GT, v3, __ T2S, v4);             //       fcmgt   v3.2S, v4.2S, #0.0
 644     __ fcm(Assembler::GT, v8, __ T4S, v9);             //       fcmgt   v8.4S, v9.4S, #0.0
 645     __ fcm(Assembler::GT, v22, __ T2D, v23);           //       fcmgt   v22.2D, v23.2D, #0.0
 646     __ fcm(Assembler::GE, v19, __ T2S, v20);           //       fcmge   v19.2S, v20.2S, #0.0
 647     __ fcm(Assembler::GE, v13, __ T4S, v14);           //       fcmge   v13.4S, v14.4S, #0.0
 648     __ fcm(Assembler::GE, v5, __ T2D, v6);             //       fcmge   v5.2D, v6.2D, #0.0
 649     __ fcm(Assembler::EQ, v29, __ T2S, v30);           //       fcmeq   v29.2S, v30.2S, #0.0
 650     __ fcm(Assembler::EQ, v24, __ T4S, v25);           //       fcmeq   v24.4S, v25.4S, #0.0
 651     __ fcm(Assembler::EQ, v21, __ T2D, v22);           //       fcmeq   v21.2D, v22.2D, #0.0
 652     __ fcm(Assembler::LT, v26, __ T2S, v27);           //       fcmlt   v26.2S, v27.2S, #0.0
 653     __ fcm(Assembler::LT, v24, __ T4S, v25);           //       fcmlt   v24.4S, v25.4S, #0.0
 654     __ fcm(Assembler::LT, v3, __ T2D, v4);             //       fcmlt   v3.2D, v4.2D, #0.0
 655     __ fcm(Assembler::LE, v24, __ T2S, v25);           //       fcmle   v24.2S, v25.2S, #0.0
 656     __ fcm(Assembler::LE, v26, __ T4S, v27);           //       fcmle   v26.4S, v27.4S, #0.0
 657     __ fcm(Assembler::LE, v23, __ T2D, v24);           //       fcmle   v23.2D, v24.2D, #0.0
 658 
 659 // TwoRegNEONOp
 660     __ absr(v15, __ T8B, v16);                         //       abs     v15.8B, v16.8B
 661     __ absr(v21, __ T16B, v22);                        //       abs     v21.16B, v22.16B
 662     __ absr(v3, __ T4H, v4);                           //       abs     v3.4H, v4.4H
 663     __ absr(v24, __ T8H, v25);                         //       abs     v24.8H, v25.8H
 664     __ absr(v8, __ T2S, v9);                           //       abs     v8.2S, v9.2S
 665     __ absr(v25, __ T4S, v26);                         //       abs     v25.4S, v26.4S
 666     __ absr(v20, __ T2D, v21);                         //       abs     v20.2D, v21.2D
 667     __ fabs(v16, __ T2S, v17);                         //       fabs    v16.2S, v17.2S
 668     __ fabs(v17, __ T4S, v18);                         //       fabs    v17.4S, v18.4S
 669     __ fabs(v2, __ T2D, v3);                           //       fabs    v2.2D, v3.2D
 670     __ fabs(v1, __ T4H, v2);                           //       fabs    v1.4H, v2.4H
 671     __ fabs(v0, __ T8H, v1);                           //       fabs    v0.8H, v1.8H
 672     __ fneg(v24, __ T2S, v25);                         //       fneg    v24.2S, v25.2S
 673     __ fneg(v4, __ T4S, v5);                           //       fneg    v4.4S, v5.4S
 674     __ fneg(v3, __ T2D, v4);                           //       fneg    v3.2D, v4.2D
 675     __ fneg(v12, __ T4H, v13);                         //       fneg    v12.4H, v13.4H
 676     __ fneg(v31, __ T8H, v0);                          //       fneg    v31.8H, v0.8H
 677     __ fsqrt(v28, __ T2S, v29);                        //       fsqrt   v28.2S, v29.2S
 678     __ fsqrt(v10, __ T4S, v11);                        //       fsqrt   v10.4S, v11.4S
 679     __ fsqrt(v26, __ T2D, v27);                        //       fsqrt   v26.2D, v27.2D
 680     __ fsqrt(v2, __ T4H, v3);                          //       fsqrt   v2.4H, v3.4H
 681     __ fsqrt(v12, __ T8H, v13);                        //       fsqrt   v12.8H, v13.8H
 682     __ notr(v18, __ T8B, v19);                         //       not     v18.8B, v19.8B
 683     __ notr(v31, __ T16B, v0);                         //       not     v31.16B, v0.16B
 684 
 685 // ThreeRegNEONOp
 686     __ andr(v1, __ T8B, v2, v3);                       //       and     v1.8B, v2.8B, v3.8B
 687     __ andr(v13, __ T16B, v14, v15);                   //       and     v13.16B, v14.16B, v15.16B
 688     __ orr(v29, __ T8B, v30, v31);                     //       orr     v29.8B, v30.8B, v31.8B
 689     __ orr(v0, __ T16B, v1, v2);                       //       orr     v0.16B, v1.16B, v2.16B
 690     __ eor(v19, __ T8B, v20, v21);                     //       eor     v19.8B, v20.8B, v21.8B
 691     __ eor(v12, __ T16B, v13, v14);                    //       eor     v12.16B, v13.16B, v14.16B
 692     __ addv(v17, __ T8B, v18, v19);                    //       add     v17.8B, v18.8B, v19.8B
 693     __ addv(v22, __ T16B, v23, v24);                   //       add     v22.16B, v23.16B, v24.16B
 694     __ addv(v13, __ T4H, v14, v15);                    //       add     v13.4H, v14.4H, v15.4H
 695     __ addv(v28, __ T8H, v29, v30);                    //       add     v28.8H, v29.8H, v30.8H
 696     __ addv(v30, __ T2S, v31, v0);                     //       add     v30.2S, v31.2S, v0.2S
 697     __ addv(v31, __ T4S, v0, v1);                      //       add     v31.4S, v0.4S, v1.4S
 698     __ addv(v1, __ T2D, v2, v3);                       //       add     v1.2D, v2.2D, v3.2D
 699     __ sqaddv(v26, __ T8B, v27, v28);                  //       sqadd   v26.8B, v27.8B, v28.8B
 700     __ sqaddv(v28, __ T16B, v29, v30);                 //       sqadd   v28.16B, v29.16B, v30.16B
 701     __ sqaddv(v4, __ T4H, v5, v6);                     //       sqadd   v4.4H, v5.4H, v6.4H
 702     __ sqaddv(v30, __ T8H, v31, v0);                   //       sqadd   v30.8H, v31.8H, v0.8H
 703     __ sqaddv(v4, __ T2S, v5, v6);                     //       sqadd   v4.2S, v5.2S, v6.2S
 704     __ sqaddv(v6, __ T4S, v7, v8);                     //       sqadd   v6.4S, v7.4S, v8.4S
 705     __ sqaddv(v30, __ T2D, v31, v0);                   //       sqadd   v30.2D, v31.2D, v0.2D
 706     __ uqaddv(v26, __ T8B, v27, v28);                  //       uqadd   v26.8B, v27.8B, v28.8B
 707     __ uqaddv(v18, __ T16B, v19, v20);                 //       uqadd   v18.16B, v19.16B, v20.16B
 708     __ uqaddv(v9, __ T4H, v10, v11);                   //       uqadd   v9.4H, v10.4H, v11.4H
 709     __ uqaddv(v8, __ T8H, v9, v10);                    //       uqadd   v8.8H, v9.8H, v10.8H
 710     __ uqaddv(v12, __ T2S, v13, v14);                  //       uqadd   v12.2S, v13.2S, v14.2S
 711     __ uqaddv(v0, __ T4S, v1, v2);                     //       uqadd   v0.4S, v1.4S, v2.4S
 712     __ uqaddv(v20, __ T2D, v21, v22);                  //       uqadd   v20.2D, v21.2D, v22.2D
 713     __ fadd(v1, __ T2S, v2, v3);                       //       fadd    v1.2S, v2.2S, v3.2S
 714     __ fadd(v24, __ T4S, v25, v26);                    //       fadd    v24.4S, v25.4S, v26.4S
 715     __ fadd(v2, __ T2D, v3, v4);                       //       fadd    v2.2D, v3.2D, v4.2D
 716     __ fadd(v0, __ T4H, v1, v2);                       //       fadd    v0.4H, v1.4H, v2.4H
 717     __ fadd(v9, __ T8H, v10, v11);                     //       fadd    v9.8H, v10.8H, v11.8H
 718     __ subv(v24, __ T8B, v25, v26);                    //       sub     v24.8B, v25.8B, v26.8B
 719     __ subv(v26, __ T16B, v27, v28);                   //       sub     v26.16B, v27.16B, v28.16B
 720     __ subv(v16, __ T4H, v17, v18);                    //       sub     v16.4H, v17.4H, v18.4H
 721     __ subv(v30, __ T8H, v31, v0);                     //       sub     v30.8H, v31.8H, v0.8H
 722     __ subv(v3, __ T2S, v4, v5);                       //       sub     v3.2S, v4.2S, v5.2S
 723     __ subv(v10, __ T4S, v11, v12);                    //       sub     v10.4S, v11.4S, v12.4S
 724     __ subv(v23, __ T2D, v24, v25);                    //       sub     v23.2D, v24.2D, v25.2D
 725     __ sqsubv(v10, __ T8B, v11, v12);                  //       sqsub   v10.8B, v11.8B, v12.8B
 726     __ sqsubv(v4, __ T16B, v5, v6);                    //       sqsub   v4.16B, v5.16B, v6.16B
 727     __ sqsubv(v18, __ T4H, v19, v20);                  //       sqsub   v18.4H, v19.4H, v20.4H
 728     __ sqsubv(v2, __ T8H, v3, v4);                     //       sqsub   v2.8H, v3.8H, v4.8H
 729     __ sqsubv(v11, __ T2S, v12, v13);                  //       sqsub   v11.2S, v12.2S, v13.2S
 730     __ sqsubv(v8, __ T4S, v9, v10);                    //       sqsub   v8.4S, v9.4S, v10.4S
 731     __ sqsubv(v10, __ T2D, v11, v12);                  //       sqsub   v10.2D, v11.2D, v12.2D
 732     __ uqsubv(v15, __ T8B, v16, v17);                  //       uqsub   v15.8B, v16.8B, v17.8B
 733     __ uqsubv(v17, __ T16B, v18, v19);                 //       uqsub   v17.16B, v18.16B, v19.16B
 734     __ uqsubv(v2, __ T4H, v3, v4);                     //       uqsub   v2.4H, v3.4H, v4.4H
 735     __ uqsubv(v10, __ T8H, v11, v12);                  //       uqsub   v10.8H, v11.8H, v12.8H
 736     __ uqsubv(v12, __ T2S, v13, v14);                  //       uqsub   v12.2S, v13.2S, v14.2S
 737     __ uqsubv(v12, __ T4S, v13, v14);                  //       uqsub   v12.4S, v13.4S, v14.4S
 738     __ uqsubv(v15, __ T2D, v16, v17);                  //       uqsub   v15.2D, v16.2D, v17.2D
 739     __ fsub(v13, __ T2S, v14, v15);                    //       fsub    v13.2S, v14.2S, v15.2S
 740     __ fsub(v2, __ T4S, v3, v4);                       //       fsub    v2.4S, v3.4S, v4.4S
 741     __ fsub(v7, __ T2D, v8, v9);                       //       fsub    v7.2D, v8.2D, v9.2D
 742     __ fsub(v20, __ T4H, v21, v22);                    //       fsub    v20.4H, v21.4H, v22.4H
 743     __ fsub(v26, __ T8H, v27, v28);                    //       fsub    v26.8H, v27.8H, v28.8H
 744     __ mulv(v16, __ T8B, v17, v18);                    //       mul     v16.8B, v17.8B, v18.8B
 745     __ mulv(v4, __ T16B, v5, v6);                      //       mul     v4.16B, v5.16B, v6.16B
 746     __ mulv(v2, __ T4H, v3, v4);                       //       mul     v2.4H, v3.4H, v4.4H
 747     __ mulv(v4, __ T8H, v5, v6);                       //       mul     v4.8H, v5.8H, v6.8H
 748     __ mulv(v12, __ T2S, v13, v14);                    //       mul     v12.2S, v13.2S, v14.2S
 749     __ mulv(v18, __ T4S, v19, v20);                    //       mul     v18.4S, v19.4S, v20.4S
 750     __ fabd(v21, __ T2S, v22, v23);                    //       fabd    v21.2S, v22.2S, v23.2S
 751     __ fabd(v16, __ T4S, v17, v18);                    //       fabd    v16.4S, v17.4S, v18.4S
 752     __ fabd(v18, __ T2D, v19, v20);                    //       fabd    v18.2D, v19.2D, v20.2D
 753     __ fabd(v11, __ T4H, v12, v13);                    //       fabd    v11.4H, v12.4H, v13.4H
 754     __ fabd(v21, __ T8H, v22, v23);                    //       fabd    v21.8H, v22.8H, v23.8H
 755     __ faddp(v23, __ T2S, v24, v25);                   //       faddp   v23.2S, v24.2S, v25.2S
 756     __ faddp(v12, __ T4S, v13, v14);                   //       faddp   v12.4S, v13.4S, v14.4S
 757     __ faddp(v26, __ T2D, v27, v28);                   //       faddp   v26.2D, v27.2D, v28.2D
 758     __ faddp(v23, __ T4H, v24, v25);                   //       faddp   v23.4H, v24.4H, v25.4H
 759     __ faddp(v28, __ T8H, v29, v30);                   //       faddp   v28.8H, v29.8H, v30.8H
 760     __ fmul(v14, __ T2S, v15, v16);                    //       fmul    v14.2S, v15.2S, v16.2S
 761     __ fmul(v11, __ T4S, v12, v13);                    //       fmul    v11.4S, v12.4S, v13.4S
 762     __ fmul(v24, __ T2D, v25, v26);                    //       fmul    v24.2D, v25.2D, v26.2D
 763     __ fmul(v1, __ T4H, v2, v3);                       //       fmul    v1.4H, v2.4H, v3.4H
 764     __ fmul(v12, __ T8H, v13, v14);                    //       fmul    v12.8H, v13.8H, v14.8H
 765     __ mlav(v31, __ T4H, v0, v1);                      //       mla     v31.4H, v0.4H, v1.4H
 766     __ mlav(v10, __ T8H, v11, v12);                    //       mla     v10.8H, v11.8H, v12.8H
 767     __ mlav(v16, __ T2S, v17, v18);                    //       mla     v16.2S, v17.2S, v18.2S
 768     __ mlav(v7, __ T4S, v8, v9);                       //       mla     v7.4S, v8.4S, v9.4S
 769     __ fmla(v2, __ T2S, v3, v4);                       //       fmla    v2.2S, v3.2S, v4.2S
 770     __ fmla(v3, __ T4S, v4, v5);                       //       fmla    v3.4S, v4.4S, v5.4S
 771     __ fmla(v13, __ T2D, v14, v15);                    //       fmla    v13.2D, v14.2D, v15.2D
 772     __ fmla(v19, __ T4H, v20, v21);                    //       fmla    v19.4H, v20.4H, v21.4H
 773     __ fmla(v17, __ T8H, v18, v19);                    //       fmla    v17.8H, v18.8H, v19.8H
 774     __ mlsv(v16, __ T4H, v17, v18);                    //       mls     v16.4H, v17.4H, v18.4H
 775     __ mlsv(v3, __ T8H, v4, v5);                       //       mls     v3.8H, v4.8H, v5.8H
 776     __ mlsv(v1, __ T2S, v2, v3);                       //       mls     v1.2S, v2.2S, v3.2S
 777     __ mlsv(v11, __ T4S, v12, v13);                    //       mls     v11.4S, v12.4S, v13.4S
 778     __ fmls(v30, __ T2S, v31, v0);                     //       fmls    v30.2S, v31.2S, v0.2S
 779     __ fmls(v5, __ T4S, v6, v7);                       //       fmls    v5.4S, v6.4S, v7.4S
 780     __ fmls(v8, __ T2D, v9, v10);                      //       fmls    v8.2D, v9.2D, v10.2D
 781     __ fmls(v15, __ T4H, v16, v17);                    //       fmls    v15.4H, v16.4H, v17.4H
 782     __ fmls(v29, __ T8H, v30, v31);                    //       fmls    v29.8H, v30.8H, v31.8H
 783     __ fdiv(v30, __ T2S, v31, v0);                     //       fdiv    v30.2S, v31.2S, v0.2S
 784     __ fdiv(v0, __ T4S, v1, v2);                       //       fdiv    v0.4S, v1.4S, v2.4S
 785     __ fdiv(v20, __ T2D, v21, v22);                    //       fdiv    v20.2D, v21.2D, v22.2D
 786     __ fdiv(v7, __ T4H, v8, v9);                       //       fdiv    v7.4H, v8.4H, v9.4H
 787     __ fdiv(v20, __ T8H, v21, v22);                    //       fdiv    v20.8H, v21.8H, v22.8H
 788     __ maxv(v23, __ T8B, v24, v25);                    //       smax    v23.8B, v24.8B, v25.8B
 789     __ maxv(v28, __ T16B, v29, v30);                   //       smax    v28.16B, v29.16B, v30.16B
 790     __ maxv(v21, __ T4H, v22, v23);                    //       smax    v21.4H, v22.4H, v23.4H
 791     __ maxv(v27, __ T8H, v28, v29);                    //       smax    v27.8H, v28.8H, v29.8H
 792     __ maxv(v25, __ T2S, v26, v27);                    //       smax    v25.2S, v26.2S, v27.2S
 793     __ maxv(v5, __ T4S, v6, v7);                       //       smax    v5.4S, v6.4S, v7.4S
 794     __ umaxv(v1, __ T8B, v2, v3);                      //       umax    v1.8B, v2.8B, v3.8B
 795     __ umaxv(v23, __ T16B, v24, v25);                  //       umax    v23.16B, v24.16B, v25.16B
 796     __ umaxv(v16, __ T4H, v17, v18);                   //       umax    v16.4H, v17.4H, v18.4H
 797     __ umaxv(v31, __ T8H, v0, v1);                     //       umax    v31.8H, v0.8H, v1.8H
 798     __ umaxv(v5, __ T2S, v6, v7);                      //       umax    v5.2S, v6.2S, v7.2S
 799     __ umaxv(v12, __ T4S, v13, v14);                   //       umax    v12.4S, v13.4S, v14.4S
 800     __ smaxp(v9, __ T8B, v10, v11);                    //       smaxp   v9.8B, v10.8B, v11.8B
 801     __ smaxp(v28, __ T16B, v29, v30);                  //       smaxp   v28.16B, v29.16B, v30.16B
 802     __ smaxp(v15, __ T4H, v16, v17);                   //       smaxp   v15.4H, v16.4H, v17.4H
 803     __ smaxp(v29, __ T8H, v30, v31);                   //       smaxp   v29.8H, v30.8H, v31.8H
 804     __ smaxp(v22, __ T2S, v23, v24);                   //       smaxp   v22.2S, v23.2S, v24.2S
 805     __ smaxp(v31, __ T4S, v0, v1);                     //       smaxp   v31.4S, v0.4S, v1.4S
 806     __ fmax(v19, __ T2S, v20, v21);                    //       fmax    v19.2S, v20.2S, v21.2S
 807     __ fmax(v31, __ T4S, v0, v1);                      //       fmax    v31.4S, v0.4S, v1.4S
 808     __ fmax(v5, __ T2D, v6, v7);                       //       fmax    v5.2D, v6.2D, v7.2D
 809     __ fmax(v14, __ T4H, v15, v16);                    //       fmax    v14.4H, v15.4H, v16.4H
 810     __ fmax(v18, __ T8H, v19, v20);                    //       fmax    v18.8H, v19.8H, v20.8H
 811     __ minv(v31, __ T8B, v0, v1);                      //       smin    v31.8B, v0.8B, v1.8B
 812     __ minv(v18, __ T16B, v19, v20);                   //       smin    v18.16B, v19.16B, v20.16B
 813     __ minv(v27, __ T4H, v28, v29);                    //       smin    v27.4H, v28.4H, v29.4H
 814     __ minv(v20, __ T8H, v21, v22);                    //       smin    v20.8H, v21.8H, v22.8H
 815     __ minv(v16, __ T2S, v17, v18);                    //       smin    v16.2S, v17.2S, v18.2S
 816     __ minv(v12, __ T4S, v13, v14);                    //       smin    v12.4S, v13.4S, v14.4S
 817     __ uminv(v11, __ T8B, v12, v13);                   //       umin    v11.8B, v12.8B, v13.8B
 818     __ uminv(v9, __ T16B, v10, v11);                   //       umin    v9.16B, v10.16B, v11.16B
 819     __ uminv(v6, __ T4H, v7, v8);                      //       umin    v6.4H, v7.4H, v8.4H
 820     __ uminv(v30, __ T8H, v31, v0);                    //       umin    v30.8H, v31.8H, v0.8H
 821     __ uminv(v17, __ T2S, v18, v19);                   //       umin    v17.2S, v18.2S, v19.2S
 822     __ uminv(v27, __ T4S, v28, v29);                   //       umin    v27.4S, v28.4S, v29.4S
 823     __ sminp(v28, __ T8B, v29, v30);                   //       sminp   v28.8B, v29.8B, v30.8B
 824     __ sminp(v30, __ T16B, v31, v0);                   //       sminp   v30.16B, v31.16B, v0.16B
 825     __ sminp(v7, __ T4H, v8, v9);                      //       sminp   v7.4H, v8.4H, v9.4H
 826     __ sminp(v10, __ T8H, v11, v12);                   //       sminp   v10.8H, v11.8H, v12.8H
 827     __ sminp(v20, __ T2S, v21, v22);                   //       sminp   v20.2S, v21.2S, v22.2S
 828     __ sminp(v10, __ T4S, v11, v12);                   //       sminp   v10.4S, v11.4S, v12.4S
 829     __ sqdmulh(v4, __ T4H, v5, v6);                    //       sqdmulh v4.4H, v5.4H, v6.4H
 830     __ sqdmulh(v24, __ T8H, v25, v26);                 //       sqdmulh v24.8H, v25.8H, v26.8H
 831     __ sqdmulh(v17, __ T2S, v18, v19);                 //       sqdmulh v17.2S, v18.2S, v19.2S
 832     __ sqdmulh(v17, __ T4S, v18, v19);                 //       sqdmulh v17.4S, v18.4S, v19.4S
 833     __ shsubv(v22, __ T8B, v23, v24);                  //       shsub   v22.8B, v23.8B, v24.8B
 834     __ shsubv(v3, __ T16B, v4, v5);                    //       shsub   v3.16B, v4.16B, v5.16B
 835     __ shsubv(v29, __ T4H, v30, v31);                  //       shsub   v29.4H, v30.4H, v31.4H
 836     __ shsubv(v15, __ T8H, v16, v17);                  //       shsub   v15.8H, v16.8H, v17.8H
 837     __ shsubv(v22, __ T2S, v23, v24);                  //       shsub   v22.2S, v23.2S, v24.2S
 838     __ shsubv(v19, __ T4S, v20, v21);                  //       shsub   v19.4S, v20.4S, v21.4S
 839     __ fmin(v19, __ T2S, v20, v21);                    //       fmin    v19.2S, v20.2S, v21.2S
 840     __ fmin(v22, __ T4S, v23, v24);                    //       fmin    v22.4S, v23.4S, v24.4S
 841     __ fmin(v2, __ T2D, v3, v4);                       //       fmin    v2.2D, v3.2D, v4.2D
 842     __ fmin(v15, __ T4H, v16, v17);                    //       fmin    v15.4H, v16.4H, v17.4H
 843     __ fmin(v6, __ T8H, v7, v8);                       //       fmin    v6.8H, v7.8H, v8.8H
 844     __ facgt(v12, __ T2S, v13, v14);                   //       facgt   v12.2S, v13.2S, v14.2S
 845     __ facgt(v16, __ T4S, v17, v18);                   //       facgt   v16.4S, v17.4S, v18.4S
 846     __ facgt(v11, __ T2D, v12, v13);                   //       facgt   v11.2D, v12.2D, v13.2D
 847     __ facgt(v13, __ T4H, v14, v15);                   //       facgt   v13.4H, v14.4H, v15.4H
 848     __ facgt(v23, __ T8H, v24, v25);                   //       facgt   v23.8H, v24.8H, v25.8H
 849 
 850 // VectorScalarNEONInstruction
 851     __ fmlavs(v15, __ T2S, v0, v1, 0);                 //       fmla    v15.2S, v0.2S, v1.S[0]
 852     __ mulvs(v2, __ T4S, v3, v4, 2);                   //       mul     v2.4S, v3.4S, v4.S[2]
 853     __ fmlavs(v1, __ T2D, v2, v3, 1);                  //       fmla    v1.2D, v2.2D, v3.D[1]
 854     __ fmlsvs(v11, __ T2S, v12, v13, 1);               //       fmls    v11.2S, v12.2S, v13.S[1]
 855     __ mulvs(v5, __ T4S, v6, v7, 1);                   //       mul     v5.4S, v6.4S, v7.S[1]
 856     __ fmlsvs(v14, __ T2D, v15, v16, 1);               //       fmls    v14.2D, v15.2D, v16.D[1]
 857     __ fmulxvs(v6, __ T2S, v7, v8, 1);                 //       fmulx   v6.2S, v7.2S, v8.S[1]
 858     __ mulvs(v1, __ T4S, v2, v3, 3);                   //       mul     v1.4S, v2.4S, v3.S[3]
 859     __ fmulxvs(v15, __ T2D, v0, v1, 0);                //       fmulx   v15.2D, v0.2D, v1.D[0]
 860     __ mulvs(v9, __ T4H, v10, v11, 3);                 //       mul     v9.4H, v10.4H, v11.H[3]
 861     __ mulvs(v4, __ T8H, v5, v6, 4);                   //       mul     v4.8H, v5.8H, v6.H[4]
 862     __ mulvs(v13, __ T2S, v14, v15, 1);                //       mul     v13.2S, v14.2S, v15.S[1]
 863     __ mulvs(v3, __ T4S, v4, v5, 1);                   //       mul     v3.4S, v4.4S, v5.S[1]
 864 
 865 // NEONVectorCompare
 866     __ cm(Assembler::GT, v21, __ T8B, v22, v23);       //       cmgt    v21.8B, v22.8B, v23.8B
 867     __ cm(Assembler::GT, v23, __ T16B, v24, v25);      //       cmgt    v23.16B, v24.16B, v25.16B
 868     __ cm(Assembler::GT, v31, __ T4H, v0, v1);         //       cmgt    v31.4H, v0.4H, v1.4H
 869     __ cm(Assembler::GT, v25, __ T8H, v26, v27);       //       cmgt    v25.8H, v26.8H, v27.8H
 870     __ cm(Assembler::GT, v2, __ T2S, v3, v4);          //       cmgt    v2.2S, v3.2S, v4.2S
 871     __ cm(Assembler::GT, v31, __ T4S, v0, v1);         //       cmgt    v31.4S, v0.4S, v1.4S
 872     __ cm(Assembler::GT, v27, __ T2D, v28, v29);       //       cmgt    v27.2D, v28.2D, v29.2D
 873     __ cm(Assembler::GE, v18, __ T8B, v19, v20);       //       cmge    v18.8B, v19.8B, v20.8B
 874     __ cm(Assembler::GE, v10, __ T16B, v11, v12);      //       cmge    v10.16B, v11.16B, v12.16B
 875     __ cm(Assembler::GE, v23, __ T4H, v24, v25);       //       cmge    v23.4H, v24.4H, v25.4H
 876     __ cm(Assembler::GE, v19, __ T8H, v20, v21);       //       cmge    v19.8H, v20.8H, v21.8H
 877     __ cm(Assembler::GE, v3, __ T2S, v4, v5);          //       cmge    v3.2S, v4.2S, v5.2S
 878     __ cm(Assembler::GE, v18, __ T4S, v19, v20);       //       cmge    v18.4S, v19.4S, v20.4S
 879     __ cm(Assembler::GE, v0, __ T2D, v1, v2);          //       cmge    v0.2D, v1.2D, v2.2D
 880     __ cm(Assembler::EQ, v25, __ T8B, v26, v27);       //       cmeq    v25.8B, v26.8B, v27.8B
 881     __ cm(Assembler::EQ, v26, __ T16B, v27, v28);      //       cmeq    v26.16B, v27.16B, v28.16B
 882     __ cm(Assembler::EQ, v23, __ T4H, v24, v25);       //       cmeq    v23.4H, v24.4H, v25.4H
 883     __ cm(Assembler::EQ, v2, __ T8H, v3, v4);          //       cmeq    v2.8H, v3.8H, v4.8H
 884     __ cm(Assembler::EQ, v18, __ T2S, v19, v20);       //       cmeq    v18.2S, v19.2S, v20.2S
 885     __ cm(Assembler::EQ, v12, __ T4S, v13, v14);       //       cmeq    v12.4S, v13.4S, v14.4S
 886     __ cm(Assembler::EQ, v4, __ T2D, v5, v6);          //       cmeq    v4.2D, v5.2D, v6.2D
 887     __ cm(Assembler::HI, v28, __ T8B, v29, v30);       //       cmhi    v28.8B, v29.8B, v30.8B
 888     __ cm(Assembler::HI, v30, __ T16B, v31, v0);       //       cmhi    v30.16B, v31.16B, v0.16B
 889     __ cm(Assembler::HI, v29, __ T4H, v30, v31);       //       cmhi    v29.4H, v30.4H, v31.4H
 890     __ cm(Assembler::HI, v16, __ T8H, v17, v18);       //       cmhi    v16.8H, v17.8H, v18.8H
 891     __ cm(Assembler::HI, v27, __ T2S, v28, v29);       //       cmhi    v27.2S, v28.2S, v29.2S
 892     __ cm(Assembler::HI, v6, __ T4S, v7, v8);          //       cmhi    v6.4S, v7.4S, v8.4S
 893     __ cm(Assembler::HI, v9, __ T2D, v10, v11);        //       cmhi    v9.2D, v10.2D, v11.2D
 894     __ cm(Assembler::HS, v29, __ T8B, v30, v31);       //       cmhs    v29.8B, v30.8B, v31.8B
 895     __ cm(Assembler::HS, v18, __ T16B, v19, v20);      //       cmhs    v18.16B, v19.16B, v20.16B
 896     __ cm(Assembler::HS, v7, __ T4H, v8, v9);          //       cmhs    v7.4H, v8.4H, v9.4H
 897     __ cm(Assembler::HS, v4, __ T8H, v5, v6);          //       cmhs    v4.8H, v5.8H, v6.8H
 898     __ cm(Assembler::HS, v7, __ T2S, v8, v9);          //       cmhs    v7.2S, v8.2S, v9.2S
 899     __ cm(Assembler::HS, v15, __ T4S, v16, v17);       //       cmhs    v15.4S, v16.4S, v17.4S
 900     __ cm(Assembler::HS, v9, __ T2D, v10, v11);        //       cmhs    v9.2D, v10.2D, v11.2D
 901     __ fcm(Assembler::EQ, v23, __ T2S, v24, v25);      //       fcmeq   v23.2S, v24.2S, v25.2S
 902     __ fcm(Assembler::EQ, v8, __ T4S, v9, v10);        //       fcmeq   v8.4S, v9.4S, v10.4S
 903     __ fcm(Assembler::EQ, v2, __ T2D, v3, v4);         //       fcmeq   v2.2D, v3.2D, v4.2D
 904     __ fcm(Assembler::GT, v28, __ T2S, v29, v30);      //       fcmgt   v28.2S, v29.2S, v30.2S
 905     __ fcm(Assembler::GT, v21, __ T4S, v22, v23);      //       fcmgt   v21.4S, v22.4S, v23.4S
 906     __ fcm(Assembler::GT, v31, __ T2D, v0, v1);        //       fcmgt   v31.2D, v0.2D, v1.2D
 907     __ fcm(Assembler::GE, v5, __ T2S, v6, v7);         //       fcmge   v5.2S, v6.2S, v7.2S
 908     __ fcm(Assembler::GE, v27, __ T4S, v28, v29);      //       fcmge   v27.4S, v28.4S, v29.4S
 909     __ fcm(Assembler::GE, v0, __ T2D, v1, v2);         //       fcmge   v0.2D, v1.2D, v2.2D
 910 
 911 // SVEComparisonWithZero
 912     __ sve_fcm(Assembler::EQ, p8, __ S, p6, z15, 0.0); //       fcmeq   p8.s, p6/z, z15.s, #0.0
 913     __ sve_fcm(Assembler::GT, p4, __ D, p6, z28, 0.0); //       fcmgt   p4.d, p6/z, z28.d, #0.0
 914     __ sve_fcm(Assembler::GE, p13, __ D, p0, z25, 0.0); //      fcmge   p13.d, p0/z, z25.d, #0.0
 915     __ sve_fcm(Assembler::LT, p2, __ D, p0, z6, 0.0);  //       fcmlt   p2.d, p0/z, z6.d, #0.0
 916     __ sve_fcm(Assembler::LE, p2, __ S, p2, z15, 0.0); //       fcmle   p2.s, p2/z, z15.s, #0.0
 917     __ sve_fcm(Assembler::NE, p3, __ S, p7, z5, 0.0);  //       fcmne   p3.s, p7/z, z5.s, #0.0
 918 
 919 // SVEComparisonWithImm
 920     __ sve_cmp(Assembler::EQ, p3, __ S, p5, z20, -10); //       cmpeq   p3.s, p5/z, z20.s, #-10
 921     __ sve_cmp(Assembler::GT, p5, __ S, p7, z8, -10);  //       cmpgt   p5.s, p7/z, z8.s, #-10
 922     __ sve_cmp(Assembler::GE, p8, __ H, p7, z2, 13);   //       cmpge   p8.h, p7/z, z2.h, #13
 923     __ sve_cmp(Assembler::LT, p1, __ S, p7, z27, -2);  //       cmplt   p1.s, p7/z, z27.s, #-2
 924     __ sve_cmp(Assembler::LE, p6, __ S, p6, z28, -11); //       cmple   p6.s, p6/z, z28.s, #-11
 925     __ sve_cmp(Assembler::NE, p1, __ H, p4, z14, -5);  //       cmpne   p1.h, p4/z, z14.h, #-5
 926     __ sve_cmp(Assembler::HS, p13, __ H, p1, z23, 90); //       cmphs   p13.h, p1/z, z23.h, #90
 927     __ sve_cmp(Assembler::HI, p8, __ B, p4, z4, 66);   //       cmphi   p8.b, p4/z, z4.b, #66
 928     __ sve_cmp(Assembler::LS, p9, __ H, p3, z13, 11);  //       cmpls   p9.h, p3/z, z13.h, #11
 929     __ sve_cmp(Assembler::LO, p8, __ S, p5, z3, 21);   //       cmplo   p8.s, p5/z, z3.s, #21
 930 
 931 // SpecialCases
 932     __ ccmn(zr, zr, 3u, Assembler::LE);                //       ccmn    xzr, xzr, #3, LE
 933     __ ccmnw(zr, zr, 5u, Assembler::EQ);               //       ccmn    wzr, wzr, #5, EQ
 934     __ ccmp(zr, 1, 4u, Assembler::NE);                 //       ccmp    xzr, 1, #4, NE
 935     __ ccmpw(zr, 2, 2, Assembler::GT);                 //       ccmp    wzr, 2, #2, GT
 936     __ extr(zr, zr, zr, 0);                            //       extr    xzr, xzr, xzr, 0
 937     __ stlxp(r0, zr, zr, sp);                          //       stlxp   w0, xzr, xzr, [sp]
 938     __ stlxpw(r2, zr, zr, r3);                         //       stlxp   w2, wzr, wzr, [x3]
 939     __ stxp(r4, zr, zr, r5);                           //       stxp    w4, xzr, xzr, [x5]
 940     __ stxpw(r6, zr, zr, sp);                          //       stxp    w6, wzr, wzr, [sp]
 941     __ dup(v0, __ T16B, zr);                           //       dup     v0.16b, wzr
 942     __ dup(v0, __ S, v1);                              //       dup     s0, v1.s[0]
 943     __ mov(v1, __ D, 0, zr);                           //       mov     v1.d[0], xzr
 944     __ mov(v1, __ S, 1, zr);                           //       mov     v1.s[1], wzr
 945     __ mov(v1, __ H, 2, zr);                           //       mov     v1.h[2], wzr
 946     __ mov(v1, __ B, 3, zr);                           //       mov     v1.b[3], wzr
 947     __ smov(r0, v1, __ S, 0);                          //       smov    x0, v1.s[0]
 948     __ smov(r0, v1, __ H, 1);                          //       smov    x0, v1.h[1]
 949     __ smov(r0, v1, __ B, 2);                          //       smov    x0, v1.b[2]
 950     __ umov(r0, v1, __ D, 0);                          //       umov    x0, v1.d[0]
 951     __ umov(r0, v1, __ S, 1);                          //       umov    w0, v1.s[1]
 952     __ umov(r0, v1, __ H, 2);                          //       umov    w0, v1.h[2]
 953     __ umov(r0, v1, __ B, 3);                          //       umov    w0, v1.b[3]
 954     __ fmovhid(r0, v1);                                //       fmov    x0, v1.d[1]
 955     __ fmovs(v9, __ T2S, 0.5f);                        //       fmov    v9.2s, 0.5
 956     __ fmovd(v14, __ T2D, 0.5f);                       //       fmov    v14.2d, 0.5
 957     __ ld1(v31, v0, __ T2D, Address(__ post(r1, r0))); //       ld1     {v31.2d, v0.2d}, [x1], x0
 958     __ fcvtzs(v0, __ T2S, v1);                         //       fcvtzs  v0.2s, v1.2s
 959     __ fcvtzs(v0, __ T4H, v1);                         //       fcvtzs  v0.4h, v1.4h
 960     __ fcvtzs(v0, __ T8H, v1);                         //       fcvtzs  v0.8h, v1.8h
 961     __ fcvtas(v2, __ T4S, v3);                         //       fcvtas  v2.4s, v3.4s
 962     __ fcvtas(v2, __ T4H, v3);                         //       fcvtas  v2.4h, v3.4h
 963     __ fcvtas(v2, __ T8H, v3);                         //       fcvtas  v2.8h, v3.8h
 964     __ fcvtms(v4, __ T2D, v5);                         //       fcvtms  v4.2d, v5.2d
 965     __ fcvtms(v4, __ T4H, v5);                         //       fcvtms  v4.4h, v5.4h
 966     __ fcvtms(v4, __ T8H, v5);                         //       fcvtms  v4.8h, v5.8h
 967     __ sve_cpy(z0, __ S, p0, v1);                      //       mov     z0.s, p0/m, s1
 968     __ sve_cpy(z0, __ B, p0, 127, true);               //       mov     z0.b, p0/m, 127
 969     __ sve_cpy(z1, __ H, p0, -128, true);              //       mov     z1.h, p0/m, -128
 970     __ sve_cpy(z2, __ S, p0, 32512, true);             //       mov     z2.s, p0/m, 32512
 971     __ sve_cpy(z5, __ D, p0, -32768, false);           //       mov     z5.d, p0/z, -32768
 972     __ sve_cpy(z10, __ B, p0, -1, false);              //       mov     z10.b, p0/z, -1
 973     __ sve_cpy(z11, __ S, p0, -1, false);              //       mov     z11.s, p0/z, -1
 974     __ sve_inc(r0, __ S);                              //       incw    x0
 975     __ sve_dec(r1, __ H);                              //       dech    x1
 976     __ sve_lsl(z0, __ B, z1, 7);                       //       lsl     z0.b, z1.b, #7
 977     __ sve_lsl(z21, __ H, z1, 15);                     //       lsl     z21.h, z1.h, #15
 978     __ sve_lsl(z0, __ S, z1, 31);                      //       lsl     z0.s, z1.s, #31
 979     __ sve_lsl(z0, __ D, z1, 63);                      //       lsl     z0.d, z1.d, #63
 980     __ sve_lsr(z0, __ B, z1, 7);                       //       lsr     z0.b, z1.b, #7
 981     __ sve_asr(z0, __ H, z11, 15);                     //       asr     z0.h, z11.h, #15
 982     __ sve_lsr(z30, __ S, z1, 31);                     //       lsr     z30.s, z1.s, #31
 983     __ sve_asr(z0, __ D, z1, 63);                      //       asr     z0.d, z1.d, #63
 984     __ sve_lsl(z0, __ B, p0, 0);                       //       lsl     z0.b, p0/m, z0.b, #0
 985     __ sve_lsl(z0, __ B, p0, 5);                       //       lsl     z0.b, p0/m, z0.b, #5
 986     __ sve_lsl(z1, __ H, p1, 15);                      //       lsl     z1.h, p1/m, z1.h, #15
 987     __ sve_lsl(z2, __ S, p2, 31);                      //       lsl     z2.s, p2/m, z2.s, #31
 988     __ sve_lsl(z3, __ D, p3, 63);                      //       lsl     z3.d, p3/m, z3.d, #63
 989     __ sve_lsr(z0, __ B, p0, 1);                       //       lsr     z0.b, p0/m, z0.b, #1
 990     __ sve_lsr(z0, __ B, p0, 8);                       //       lsr     z0.b, p0/m, z0.b, #8
 991     __ sve_lsr(z1, __ H, p1, 15);                      //       lsr     z1.h, p1/m, z1.h, #15
 992     __ sve_lsr(z2, __ S, p2, 7);                       //       lsr     z2.s, p2/m, z2.s, #7
 993     __ sve_lsr(z2, __ S, p2, 31);                      //       lsr     z2.s, p2/m, z2.s, #31
 994     __ sve_lsr(z3, __ D, p3, 63);                      //       lsr     z3.d, p3/m, z3.d, #63
 995     __ sve_asr(z0, __ B, p0, 1);                       //       asr     z0.b, p0/m, z0.b, #1
 996     __ sve_asr(z0, __ B, p0, 7);                       //       asr     z0.b, p0/m, z0.b, #7
 997     __ sve_asr(z1, __ H, p1, 5);                       //       asr     z1.h, p1/m, z1.h, #5
 998     __ sve_asr(z1, __ H, p1, 15);                      //       asr     z1.h, p1/m, z1.h, #15
 999     __ sve_asr(z2, __ S, p2, 31);                      //       asr     z2.s, p2/m, z2.s, #31
1000     __ sve_asr(z3, __ D, p3, 63);                      //       asr     z3.d, p3/m, z3.d, #63
1001     __ sve_addvl(sp, r0, 31);                          //       addvl   sp, x0, #31
1002     __ sve_addpl(r1, sp, -32);                         //       addpl   x1, sp, -32
1003     __ sve_cntp(r8, __ B, p0, p1);                     //       cntp    x8, p0, p1.b
1004     __ sve_dup(z0, __ B, 127);                         //       dup     z0.b, 127
1005     __ sve_dup(z1, __ H, -128);                        //       dup     z1.h, -128
1006     __ sve_dup(z2, __ S, 32512);                       //       dup     z2.s, 32512
1007     __ sve_dup(z7, __ D, -32768);                      //       dup     z7.d, -32768
1008     __ sve_dup(z10, __ B, -1);                         //       dup     z10.b, -1
1009     __ sve_dup(z11, __ S, -1);                         //       dup     z11.s, -1
1010     __ sve_ld1b(z0, __ B, p0, Address(sp));            //       ld1b    {z0.b}, p0/z, [sp]
1011     __ sve_ld1b(z0, __ H, p1, Address(sp));            //       ld1b    {z0.h}, p1/z, [sp]
1012     __ sve_ld1b(z0, __ S, p2, Address(sp, r8));        //       ld1b    {z0.s}, p2/z, [sp, x8]
1013     __ sve_ld1b(z0, __ D, p3, Address(sp, 7));         //       ld1b    {z0.d}, p3/z, [sp, #7, MUL VL]
1014     __ sve_ld1h(z10, __ H, p1, Address(sp, -8));       //       ld1h    {z10.h}, p1/z, [sp, #-8, MUL VL]
1015     __ sve_ld1w(z20, __ S, p2, Address(r0, 7));        //       ld1w    {z20.s}, p2/z, [x0, #7, MUL VL]
1016     __ sve_ld1b(z30, __ B, p3, Address(sp, r8));       //       ld1b    {z30.b}, p3/z, [sp, x8]
1017     __ sve_ld1w(z0, __ S, p4, Address(sp, r28));       //       ld1w    {z0.s}, p4/z, [sp, x28, LSL #2]
1018     __ sve_ld1d(z11, __ D, p5, Address(r0, r1));       //       ld1d    {z11.d}, p5/z, [x0, x1, LSL #3]
1019     __ sve_st1b(z22, __ B, p6, Address(sp));           //       st1b    {z22.b}, p6, [sp]
1020     __ sve_st1b(z31, __ B, p7, Address(sp, -8));       //       st1b    {z31.b}, p7, [sp, #-8, MUL VL]
1021     __ sve_st1b(z0, __ H, p1, Address(sp));            //       st1b    {z0.h}, p1, [sp]
1022     __ sve_st1b(z0, __ S, p2, Address(sp, r8));        //       st1b    {z0.s}, p2, [sp, x8]
1023     __ sve_st1b(z0, __ D, p3, Address(sp));            //       st1b    {z0.d}, p3, [sp]
1024     __ sve_st1w(z0, __ S, p1, Address(r0, 7));         //       st1w    {z0.s}, p1, [x0, #7, MUL VL]
1025     __ sve_st1b(z0, __ B, p2, Address(sp, r1));        //       st1b    {z0.b}, p2, [sp, x1]
1026     __ sve_st1h(z0, __ H, p3, Address(sp, r8));        //       st1h    {z0.h}, p3, [sp, x8, LSL #1]
1027     __ sve_st1d(z0, __ D, p4, Address(r0, r17));       //       st1d    {z0.d}, p4, [x0, x17, LSL #3]
1028     __ sve_ldr(z0, Address(sp));                       //       ldr     z0, [sp]
1029     __ sve_ldr(z31, Address(sp, -256));                //       ldr     z31, [sp, #-256, MUL VL]
1030     __ sve_str(z8, Address(r8, 255));                  //       str     z8, [x8, #255, MUL VL]
1031     __ sve_cntb(r9);                                   //       cntb    x9
1032     __ sve_cnth(r10);                                  //       cnth    x10
1033     __ sve_cntw(r11);                                  //       cntw    x11
1034     __ sve_cntd(r12);                                  //       cntd    x12
1035     __ sve_brka(p2, p0, p2, false);                    //       brka    p2.b, p0/z, p2.b
1036     __ sve_brka(p1, p2, p3, true);                     //       brka    p1.b, p2/m, p3.b
1037     __ sve_brkb(p1, p2, p3, false);                    //       brkb    p1.b, p2/z, p3.b
1038     __ sve_brkb(p2, p3, p4, true);                     //       brkb    p2.b, p3/m, p4.b
1039     __ sve_rev(p0, __ B, p1);                          //       rev     p0.b, p1.b
1040     __ sve_rev(p1, __ H, p2);                          //       rev     p1.h, p2.h
1041     __ sve_rev(p2, __ S, p3);                          //       rev     p2.s, p3.s
1042     __ sve_rev(p3, __ D, p4);                          //       rev     p3.d, p4.d
1043     __ sve_incp(r0, __ B, p2);                         //       incp    x0, p2.b
1044     __ sve_whilelt(p0, __ B, r1, r28);                 //       whilelt p0.b, x1, x28
1045     __ sve_whilele(p2, __ H, r11, r8);                 //       whilele p2.h, x11, x8
1046     __ sve_whilelo(p3, __ S, r7, r2);                  //       whilelo p3.s, x7, x2
1047     __ sve_whilels(p4, __ D, r17, r10);                //       whilels p4.d, x17, x10
1048     __ sve_whileltw(p1, __ B, r1, r28);                //       whilelt p1.b, w1, w28
1049     __ sve_whilelew(p2, __ H, r11, r8);                //       whilele p2.h, w11, w8
1050     __ sve_whilelow(p3, __ S, r7, r2);                 //       whilelo p3.s, w7, w2
1051     __ sve_whilelsw(p4, __ D, r17, r10);               //       whilels p4.d, w17, w10
1052     __ sve_sel(z0, __ B, p0, z1, z2);                  //       sel     z0.b, p0, z1.b, z2.b
1053     __ sve_sel(z4, __ D, p0, z5, z6);                  //       sel     z4.d, p0, z5.d, z6.d
1054     __ sve_cmp(Assembler::EQ, p1, __ B, p0, z0, z1);   //       cmpeq   p1.b, p0/z, z0.b, z1.b
1055     __ sve_cmp(Assembler::NE, p1, __ H, p0, z2, z3);   //       cmpne   p1.h, p0/z, z2.h, z3.h
1056     __ sve_cmp(Assembler::GE, p1, __ S, p2, z4, z5);   //       cmpge   p1.s, p2/z, z4.s, z5.s
1057     __ sve_cmp(Assembler::GT, p1, __ D, p3, z6, z7);   //       cmpgt   p1.d, p3/z, z6.d, z7.d
1058     __ sve_cmp(Assembler::HI, p1, __ S, p2, z4, z5);   //       cmphi   p1.s, p2/z, z4.s, z5.s
1059     __ sve_cmp(Assembler::HS, p1, __ D, p3, z6, z7);   //       cmphs   p1.d, p3/z, z6.d, z7.d
1060     __ sve_cmp(Assembler::EQ, p1, __ B, p4, z0, 15);   //       cmpeq   p1.b, p4/z, z0.b, #15
1061     __ sve_cmp(Assembler::NE, p1, __ H, p0, z2, -16);  //       cmpne   p1.h, p0/z, z2.h, #-16
1062     __ sve_cmp(Assembler::LE, p1, __ S, p1, z4, 0);    //       cmple   p1.s, p1/z, z4.s, #0
1063     __ sve_cmp(Assembler::LT, p1, __ D, p2, z6, -1);   //       cmplt   p1.d, p2/z, z6.d, #-1
1064     __ sve_cmp(Assembler::GE, p1, __ S, p3, z4, 5);    //       cmpge   p1.s, p3/z, z4.s, #5
1065     __ sve_cmp(Assembler::GT, p1, __ B, p4, z6, -2);   //       cmpgt   p1.b, p4/z, z6.b, #-2
1066     __ sve_fcm(Assembler::EQ, p1, __ S, p0, z0, z1);   //       fcmeq   p1.s, p0/z, z0.s, z1.s
1067     __ sve_fcm(Assembler::NE, p1, __ D, p0, z2, z3);   //       fcmne   p1.d, p0/z, z2.d, z3.d
1068     __ sve_fcm(Assembler::GT, p1, __ S, p2, z4, z5);   //       fcmgt   p1.s, p2/z, z4.s, z5.s
1069     __ sve_fcm(Assembler::GE, p1, __ D, p3, z6, z7);   //       fcmge   p1.d, p3/z, z6.d, z7.d
1070     __ sve_uunpkhi(z0, __ H, z1);                      //       uunpkhi z0.h, z1.b
1071     __ sve_uunpklo(z4, __ S, z5);                      //       uunpklo z4.s, z5.h
1072     __ sve_sunpkhi(z6, __ D, z7);                      //       sunpkhi z6.d, z7.s
1073     __ sve_sunpklo(z10, __ H, z11);                    //       sunpklo z10.h, z11.b
1074     __ sve_scvtf(z1, __ D, p0, z0, __ S);              //       scvtf   z1.d, p0/m, z0.s
1075     __ sve_scvtf(z3, __ D, p1, z2, __ D);              //       scvtf   z3.d, p1/m, z2.d
1076     __ sve_scvtf(z6, __ S, p2, z1, __ D);              //       scvtf   z6.s, p2/m, z1.d
1077     __ sve_scvtf(z6, __ S, p3, z1, __ S);              //       scvtf   z6.s, p3/m, z1.s
1078     __ sve_scvtf(z6, __ H, p3, z1, __ S);              //       scvtf   z6.h, p3/m, z1.s
1079     __ sve_scvtf(z6, __ H, p3, z1, __ D);              //       scvtf   z6.h, p3/m, z1.d
1080     __ sve_scvtf(z6, __ H, p3, z1, __ H);              //       scvtf   z6.h, p3/m, z1.h
1081     __ sve_fcvt(z5, __ D, p3, z4, __ S);               //       fcvt    z5.d, p3/m, z4.s
1082     __ sve_fcvt(z1, __ S, p3, z0, __ D);               //       fcvt    z1.s, p3/m, z0.d
1083     __ sve_fcvt(z5, __ S, p3, z4, __ H);               //       fcvt    z5.s, p3/m, z4.h
1084     __ sve_fcvt(z1, __ H, p3, z0, __ S);               //       fcvt    z1.h, p3/m, z0.s
1085     __ sve_fcvt(z5, __ D, p3, z4, __ H);               //       fcvt    z5.d, p3/m, z4.h
1086     __ sve_fcvt(z1, __ H, p3, z0, __ D);               //       fcvt    z1.h, p3/m, z0.d
1087     __ sve_fcvtzs(z19, __ D, p2, z1, __ D);            //       fcvtzs  z19.d, p2/m, z1.d
1088     __ sve_fcvtzs(z9, __ S, p1, z8, __ S);             //       fcvtzs  z9.s, p1/m, z8.s
1089     __ sve_fcvtzs(z1, __ S, p2, z0, __ D);             //       fcvtzs  z1.s, p2/m, z0.d
1090     __ sve_fcvtzs(z1, __ D, p3, z0, __ S);             //       fcvtzs  z1.d, p3/m, z0.s
1091     __ sve_fcvtzs(z1, __ S, p4, z18, __ H);            //       fcvtzs  z1.s, p4/m, z18.h
1092     __ sve_lasta(r0, __ B, p0, z15);                   //       lasta   w0, p0, z15.b
1093     __ sve_lastb(r1, __ B, p1, z16);                   //       lastb   w1, p1, z16.b
1094     __ sve_lasta(v0, __ B, p0, z15);                   //       lasta   b0, p0, z15.b
1095     __ sve_lastb(v1, __ B, p1, z16);                   //       lastb   b1, p1, z16.b
1096     __ sve_index(z6, __ S, 1, 1);                      //       index   z6.s, #1, #1
1097     __ sve_index(z6, __ B, r5, 2);                     //       index   z6.b, w5, #2
1098     __ sve_index(z6, __ H, r5, 3);                     //       index   z6.h, w5, #3
1099     __ sve_index(z6, __ S, r5, 4);                     //       index   z6.s, w5, #4
1100     __ sve_index(z7, __ D, r5, 5);                     //       index   z7.d, x5, #5
1101     __ sve_cpy(z7, __ H, p3, r5);                      //       cpy     z7.h, p3/m, w5
1102     __ sve_tbl(z16, __ S, z17, z18);                   //       tbl     z16.s, {z17.s}, z18.s
1103     __ sve_ld1w_gather(z15, p0, r5, z16);              //       ld1w    {z15.s}, p0/z, [x5, z16.s, uxtw #2]
1104     __ sve_ld1d_gather(z15, p0, r5, z16);              //       ld1d    {z15.d}, p0/z, [x5, z16.d, uxtw #3]
1105     __ sve_st1w_scatter(z15, p0, r5, z16);             //       st1w    {z15.s}, p0, [x5, z16.s, uxtw #2]
1106     __ sve_st1d_scatter(z15, p0, r5, z16);             //       st1d    {z15.d}, p0, [x5, z16.d, uxtw #3]
1107     __ sve_and(p0, p1, p2, p3);                        //       and     p0.b, p1/z, p2.b, p3.b
1108     __ sve_ands(p4, p5, p6, p0);                       //       ands    p4.b, p5/z, p6.b, p0.b
1109     __ sve_eor(p0, p1, p2, p3);                        //       eor     p0.b, p1/z, p2.b, p3.b
1110     __ sve_eors(p5, p6, p0, p1);                       //       eors    p5.b, p6/z, p0.b, p1.b
1111     __ sve_orr(p0, p1, p2, p3);                        //       orr     p0.b, p1/z, p2.b, p3.b
1112     __ sve_orrs(p9, p1, p4, p5);                       //       orrs    p9.b, p1/z, p4.b, p5.b
1113     __ sve_bic(p10, p7, p9, p11);                      //       bic     p10.b, p7/z, p9.b, p11.b
1114     __ sve_ptest(p7, p1);                              //       ptest   p7, p1.b
1115     __ sve_ptrue(p1, __ B);                            //       ptrue   p1.b
1116     __ sve_ptrue(p1, __ B, 0b00001);                   //       ptrue   p1.b, vl1
1117     __ sve_ptrue(p1, __ B, 0b00101);                   //       ptrue   p1.b, vl5
1118     __ sve_ptrue(p1, __ B, 0b01001);                   //       ptrue   p1.b, vl16
1119     __ sve_ptrue(p1, __ B, 0b01101);                   //       ptrue   p1.b, vl256
1120     __ sve_ptrue(p2, __ H);                            //       ptrue   p2.h
1121     __ sve_ptrue(p2, __ H, 0b00010);                   //       ptrue   p2.h, vl2
1122     __ sve_ptrue(p2, __ H, 0b00110);                   //       ptrue   p2.h, vl6
1123     __ sve_ptrue(p2, __ H, 0b01010);                   //       ptrue   p2.h, vl32
1124     __ sve_ptrue(p3, __ S);                            //       ptrue   p3.s
1125     __ sve_ptrue(p3, __ S, 0b00011);                   //       ptrue   p3.s, vl3
1126     __ sve_ptrue(p3, __ S, 0b00111);                   //       ptrue   p3.s, vl7
1127     __ sve_ptrue(p3, __ S, 0b01011);                   //       ptrue   p3.s, vl64
1128     __ sve_ptrue(p4, __ D);                            //       ptrue   p4.d
1129     __ sve_ptrue(p4, __ D, 0b00100);                   //       ptrue   p4.d, vl4
1130     __ sve_ptrue(p4, __ D, 0b01000);                   //       ptrue   p4.d, vl8
1131     __ sve_ptrue(p4, __ D, 0b01100);                   //       ptrue   p4.d, vl128
1132     __ sve_pfalse(p7);                                 //       pfalse  p7.b
1133     __ sve_uzp1(p0, __ B, p0, p1);                     //       uzp1    p0.b, p0.b, p1.b
1134     __ sve_uzp1(p0, __ H, p0, p1);                     //       uzp1    p0.h, p0.h, p1.h
1135     __ sve_uzp1(p0, __ S, p0, p1);                     //       uzp1    p0.s, p0.s, p1.s
1136     __ sve_uzp1(p0, __ D, p0, p1);                     //       uzp1    p0.d, p0.d, p1.d
1137     __ sve_uzp2(p0, __ B, p0, p1);                     //       uzp2    p0.b, p0.b, p1.b
1138     __ sve_uzp2(p0, __ H, p0, p1);                     //       uzp2    p0.h, p0.h, p1.h
1139     __ sve_uzp2(p0, __ S, p0, p1);                     //       uzp2    p0.s, p0.s, p1.s
1140     __ sve_uzp2(p0, __ D, p0, p1);                     //       uzp2    p0.d, p0.d, p1.d
1141     __ sve_punpklo(p1, p0);                            //       punpklo p1.h, p0.b
1142     __ sve_punpkhi(p1, p0);                            //       punpkhi p1.h, p0.b
1143     __ sve_compact(z16, __ S, z16, p1);                //       compact z16.s, p1, z16.s
1144     __ sve_compact(z16, __ D, z16, p1);                //       compact z16.d, p1, z16.d
1145     __ sve_ext(z17, z16, 63);                          //       ext     z17.b, z17.b, z16.b, #63
1146     __ sve_fac(Assembler::GT, p1, __ H, p2, z4, z5);   //       facgt   p1.h, p2/z, z4.h, z5.h
1147     __ sve_fac(Assembler::GT, p1, __ S, p2, z4, z5);   //       facgt   p1.s, p2/z, z4.s, z5.s
1148     __ sve_fac(Assembler::GT, p1, __ D, p2, z4, z5);   //       facgt   p1.d, p2/z, z4.d, z5.d
1149     __ sve_fac(Assembler::GE, p1, __ H, p2, z4, z5);   //       facge   p1.h, p2/z, z4.h, z5.h
1150     __ sve_fac(Assembler::GE, p1, __ S, p2, z4, z5);   //       facge   p1.s, p2/z, z4.s, z5.s
1151     __ sve_fac(Assembler::GE, p1, __ D, p2, z4, z5);   //       facge   p1.d, p2/z, z4.d, z5.d
1152     __ sve_histcnt(z16, __ S, p0, z16, z16);           //       histcnt z16.s, p0/z, z16.s, z16.s
1153     __ sve_histcnt(z17, __ D, p0, z17, z17);           //       histcnt z17.d, p0/z, z17.d, z17.d
1154 
1155 // FloatImmediateOp
1156     __ fmovd(v0, 2.0);                                 //       fmov d0, #2.0
1157     __ fmovd(v0, 2.125);                               //       fmov d0, #2.125
1158     __ fmovd(v0, 4.0);                                 //       fmov d0, #4.0
1159     __ fmovd(v0, 4.25);                                //       fmov d0, #4.25
1160     __ fmovd(v0, 8.0);                                 //       fmov d0, #8.0
1161     __ fmovd(v0, 8.5);                                 //       fmov d0, #8.5
1162     __ fmovd(v0, 16.0);                                //       fmov d0, #16.0
1163     __ fmovd(v0, 17.0);                                //       fmov d0, #17.0
1164     __ fmovd(v0, 0.125);                               //       fmov d0, #0.125
1165     __ fmovd(v0, 0.1328125);                           //       fmov d0, #0.1328125
1166     __ fmovd(v0, 0.25);                                //       fmov d0, #0.25
1167     __ fmovd(v0, 0.265625);                            //       fmov d0, #0.265625
1168     __ fmovd(v0, 0.5);                                 //       fmov d0, #0.5
1169     __ fmovd(v0, 0.53125);                             //       fmov d0, #0.53125
1170     __ fmovd(v0, 1.0);                                 //       fmov d0, #1.0
1171     __ fmovd(v0, 1.0625);                              //       fmov d0, #1.0625
1172     __ fmovd(v0, -2.0);                                //       fmov d0, #-2.0
1173     __ fmovd(v0, -2.125);                              //       fmov d0, #-2.125
1174     __ fmovd(v0, -4.0);                                //       fmov d0, #-4.0
1175     __ fmovd(v0, -4.25);                               //       fmov d0, #-4.25
1176     __ fmovd(v0, -8.0);                                //       fmov d0, #-8.0
1177     __ fmovd(v0, -8.5);                                //       fmov d0, #-8.5
1178     __ fmovd(v0, -16.0);                               //       fmov d0, #-16.0
1179     __ fmovd(v0, -17.0);                               //       fmov d0, #-17.0
1180     __ fmovd(v0, -0.125);                              //       fmov d0, #-0.125
1181     __ fmovd(v0, -0.1328125);                          //       fmov d0, #-0.1328125
1182     __ fmovd(v0, -0.25);                               //       fmov d0, #-0.25
1183     __ fmovd(v0, -0.265625);                           //       fmov d0, #-0.265625
1184     __ fmovd(v0, -0.5);                                //       fmov d0, #-0.5
1185     __ fmovd(v0, -0.53125);                            //       fmov d0, #-0.53125
1186     __ fmovd(v0, -1.0);                                //       fmov d0, #-1.0
1187     __ fmovd(v0, -1.0625);                             //       fmov d0, #-1.0625
1188 
1189 // LSEOp
1190     __ swp(Assembler::xword, r6, r7, r19);             //       swp     x6, x7, [x19]
1191     __ ldadd(Assembler::xword, r13, r28, r17);         //       ldadd   x13, x28, [x17]
1192     __ ldbic(Assembler::xword, r16, r6, r2);           //       ldclr   x16, x6, [x2]
1193     __ ldeor(Assembler::xword, r29, r3, r4);           //       ldeor   x29, x3, [x4]
1194     __ ldorr(Assembler::xword, r6, r16, r20);          //       ldset   x6, x16, [x20]
1195     __ ldsmin(Assembler::xword, r13, r12, r20);        //       ldsmin  x13, x12, [x20]
1196     __ ldsmax(Assembler::xword, r8, r25, r20);         //       ldsmax  x8, x25, [x20]
1197     __ ldumin(Assembler::xword, r19, r0, r11);         //       ldumin  x19, x0, [x11]
1198     __ ldumax(Assembler::xword, r24, r6, r20);         //       ldumax  x24, x6, [x20]
1199 
1200 // LSEOp
1201     __ swpa(Assembler::xword, zr, r14, r16);           //       swpa    xzr, x14, [x16]
1202     __ ldadda(Assembler::xword, r6, r0, r7);           //       ldadda  x6, x0, [x7]
1203     __ ldbica(Assembler::xword, r15, r19, r26);        //       ldclra  x15, x19, [x26]
1204     __ ldeora(Assembler::xword, r9, r10, r23);         //       ldeora  x9, x10, [x23]
1205     __ ldorra(Assembler::xword, r21, r22, r28);        //       ldseta  x21, x22, [x28]
1206     __ ldsmina(Assembler::xword, r2, r3, r15);         //       ldsmina x2, x3, [x15]
1207     __ ldsmaxa(Assembler::xword, r19, r20, r7);        //       ldsmaxa x19, x20, [x7]
1208     __ ldumina(Assembler::xword, r4, r29, r7);         //       ldumina x4, x29, [x7]
1209     __ ldumaxa(Assembler::xword, r0, r9, r16);         //       ldumaxa x0, x9, [x16]
1210 
1211 // LSEOp
1212     __ swpal(Assembler::xword, r20, r23, r4);          //       swpal   x20, x23, [x4]
1213     __ ldaddal(Assembler::xword, r16, r10, r23);       //       ldaddal x16, x10, [x23]
1214     __ ldbical(Assembler::xword, r11, r25, r6);        //       ldclral x11, x25, [x6]
1215     __ ldeoral(Assembler::xword, zr, r16, r13);        //       ldeoral xzr, x16, [x13]
1216     __ ldorral(Assembler::xword, r23, r12, r1);        //       ldsetal x23, x12, [x1]
1217     __ ldsminal(Assembler::xword, r14, r9, r21);       //       ldsminal        x14, x9, [x21]
1218     __ ldsmaxal(Assembler::xword, r16, r26, r15);      //       ldsmaxal        x16, x26, [x15]
1219     __ lduminal(Assembler::xword, r4, r4, r15);        //       lduminal        x4, x4, [x15]
1220     __ ldumaxal(Assembler::xword, r8, r6, r30);        //       ldumaxal        x8, x6, [x30]
1221 
1222 // LSEOp
1223     __ swpl(Assembler::xword, r4, r29, r17);           //       swpl    x4, x29, [x17]
1224     __ ldaddl(Assembler::xword, r29, r26, r9);         //       ldaddl  x29, x26, [x9]
1225     __ ldbicl(Assembler::xword, r15, r2, r11);         //       ldclrl  x15, x2, [x11]
1226     __ ldeorl(Assembler::xword, r29, r3, r7);          //       ldeorl  x29, x3, [x7]
1227     __ ldorrl(Assembler::xword, r1, r27, r21);         //       ldsetl  x1, x27, [x21]
1228     __ ldsminl(Assembler::xword, r16, r14, r8);        //       ldsminl x16, x14, [x8]
1229     __ ldsmaxl(Assembler::xword, r16, r22, r25);       //       ldsmaxl x16, x22, [x25]
1230     __ lduminl(Assembler::xword, r5, r20, r21);        //       lduminl x5, x20, [x21]
1231     __ ldumaxl(Assembler::xword, r16, r23, r16);       //       ldumaxl x16, x23, [x16]
1232 
1233 // LSEOp
1234     __ swp(Assembler::word, r30, r20, r20);            //       swp     w30, w20, [x20]
1235     __ ldadd(Assembler::word, r0, r4, r19);            //       ldadd   w0, w4, [x19]
1236     __ ldbic(Assembler::word, r24, r4, r20);           //       ldclr   w24, w4, [x20]
1237     __ ldeor(Assembler::word, r4, r24, r26);           //       ldeor   w4, w24, [x26]
1238     __ ldorr(Assembler::word, r19, r2, r8);            //       ldset   w19, w2, [x8]
1239     __ ldsmin(Assembler::word, r8, r14, r24);          //       ldsmin  w8, w14, [x24]
1240     __ ldsmax(Assembler::word, r16, zr, r22);          //       ldsmax  w16, wzr, [x22]
1241     __ ldumin(Assembler::word, r4, zr, r1);            //       ldumin  w4, wzr, [x1]
1242     __ ldumax(Assembler::word, r10, r20, r12);         //       ldumax  w10, w20, [x12]
1243 
1244 // LSEOp
1245     __ swpa(Assembler::word, r0, r9, r7);              //       swpa    w0, w9, [x7]
1246     __ ldadda(Assembler::word, r24, r16, r4);          //       ldadda  w24, w16, [x4]
1247     __ ldbica(Assembler::word, r27, r6, r10);          //       ldclra  w27, w6, [x10]
1248     __ ldeora(Assembler::word, r27, r24, r13);         //       ldeora  w27, w24, [x13]
1249     __ ldorra(Assembler::word, r16, zr, r22);          //       ldseta  w16, wzr, [x22]
1250     __ ldsmina(Assembler::word, r22, r20, sp);         //       ldsmina w22, w20, [sp]
1251     __ ldsmaxa(Assembler::word, r29, r9, r14);         //       ldsmaxa w29, w9, [x14]
1252     __ ldumina(Assembler::word, r20, r7, r20);         //       ldumina w20, w7, [x20]
1253     __ ldumaxa(Assembler::word, r28, r9, r11);         //       ldumaxa w28, w9, [x11]
1254 
1255 // LSEOp
1256     __ swpal(Assembler::word, r14, r12, r20);          //       swpal   w14, w12, [x20]
1257     __ ldaddal(Assembler::word, r1, r24, r9);          //       ldaddal w1, w24, [x9]
1258     __ ldbical(Assembler::word, r19, r13, r19);        //       ldclral w19, w13, [x19]
1259     __ ldeoral(Assembler::word, r16, r16, r5);         //       ldeoral w16, w16, [x5]
1260     __ ldorral(Assembler::word, r0, r3, r12);          //       ldsetal w0, w3, [x12]
1261     __ ldsminal(Assembler::word, r8, r15, r15);        //       ldsminal        w8, w15, [x15]
1262     __ ldsmaxal(Assembler::word, r16, r4, r15);        //       ldsmaxal        w16, w4, [x15]
1263     __ lduminal(Assembler::word, r30, r5, r0);         //       lduminal        w30, w5, [x0]
1264     __ ldumaxal(Assembler::word, r10, r22, r27);       //       ldumaxal        w10, w22, [x27]
1265 
1266 // LSEOp
1267     __ swpl(Assembler::word, r3, r0, r9);              //       swpl    w3, w0, [x9]
1268     __ ldaddl(Assembler::word, r19, r29, r10);         //       ldaddl  w19, w29, [x10]
1269     __ ldbicl(Assembler::word, r24, r4, r20);          //       ldclrl  w24, w4, [x20]
1270     __ ldeorl(Assembler::word, r7, r24, r29);          //       ldeorl  w7, w24, [x29]
1271     __ ldorrl(Assembler::word, r14, r21, r11);         //       ldsetl  w14, w21, [x11]
1272     __ ldsminl(Assembler::word, r27, r13, r15);        //       ldsminl w27, w13, [x15]
1273     __ ldsmaxl(Assembler::word, zr, r17, r14);         //       ldsmaxl wzr, w17, [x14]
1274     __ lduminl(Assembler::word, r3, r30, r16);         //       lduminl w3, w30, [x16]
1275     __ ldumaxl(Assembler::word, r22, r20, r7);         //       ldumaxl w22, w20, [x7]
1276 
1277 // SHA3SIMDOp
1278     __ bcax(v20, __ T16B, v3, v1, v26);                //       bcax            v20.16B, v3.16B, v1.16B, v26.16B
1279     __ eor3(v19, __ T16B, v9, v16, v17);               //       eor3            v19.16B, v9.16B, v16.16B, v17.16B
1280     __ rax1(v21, __ T2D, v0, v4);                      //       rax1            v21.2D, v0.2D, v4.2D
1281     __ xar(v2, __ T2D, v24, v14, 12);                  //       xar             v2.2D, v24.2D, v14.2D, #12
1282 
1283 // SHA512SIMDOp
1284     __ sha512h(v11, __ T2D, v21, v14);                 //       sha512h         q11, q21, v14.2D
1285     __ sha512h2(v17, __ T2D, v30, v12);                //       sha512h2                q17, q30, v12.2D
1286     __ sha512su0(v3, __ T2D, v3);                      //       sha512su0               v3.2D, v3.2D
1287     __ sha512su1(v23, __ T2D, v9, v3);                 //       sha512su1               v23.2D, v9.2D, v3.2D
1288 
1289 // SVEBinaryImmOp
1290     __ sve_add(z24, __ D, 26u);                        //       add     z24.d, z24.d, #0x1a
1291     __ sve_sub(z19, __ S, 62u);                        //       sub     z19.s, z19.s, #0x3e
1292     __ sve_and(z26, __ S, 917504u);                    //       and     z26.s, z26.s, #0xe0000
1293     __ sve_eor(z8, __ D, 18442240474082197503u);       //       eor     z8.d, z8.d, #0xfff0000000003fff
1294     __ sve_orr(z18, __ S, 253952u);                    //       orr     z18.s, z18.s, #0x3e000
1295 
1296 // SVEBinaryImmOp
1297     __ sve_add(z9, __ S, 97u);                         //       add     z9.s, z9.s, #0x61
1298     __ sve_sub(z8, __ H, 118u);                        //       sub     z8.h, z8.h, #0x76
1299     __ sve_and(z19, __ S, 1056980736u);                //       and     z19.s, z19.s, #0x3f003f00
1300     __ sve_eor(z25, __ S, 3758350339u);                //       eor     z25.s, z25.s, #0xe003e003
1301     __ sve_orr(z9, __ S, 4294459391u);                 //       orr     z9.s, z9.s, #0xfff83fff
1302 
1303 // SVEBinaryImmOp
1304     __ sve_add(z23, __ D, 183u);                       //       add     z23.d, z23.d, #0xb7
1305     __ sve_sub(z8, __ H, 41u);                         //       sub     z8.h, z8.h, #0x29
1306     __ sve_and(z28, __ D, 8064u);                      //       and     z28.d, z28.d, #0x1f80
1307     __ sve_eor(z15, __ D, 18428729675200069887u);      //       eor     z15.d, z15.d, #0xffc00000000000ff
1308     __ sve_orr(z0, __ B, 239u);                        //       orr     z0.b, z0.b, #0xef
1309 
1310 // SVEBinaryImmOp
1311     __ sve_add(z5, __ D, 243u);                        //       add     z5.d, z5.d, #0xf3
1312     __ sve_sub(z19, __ S, 8u);                         //       sub     z19.s, z19.s, #0x8
1313     __ sve_and(z13, __ H, 32256u);                     //       and     z13.h, z13.h, #0x7e00
1314     __ sve_eor(z0, __ S, 4294967293u);                 //       eor     z0.s, z0.s, #0xfffffffd
1315     __ sve_orr(z21, __ S, 4294965263u);                //       orr     z21.s, z21.s, #0xfffff80f
1316 
1317 // SVEBinaryImmOp
1318     __ sve_add(z12, __ H, 20u);                        //       add     z12.h, z12.h, #0x14
1319     __ sve_sub(z0, __ H, 190u);                        //       sub     z0.h, z0.h, #0xbe
1320     __ sve_and(z23, __ B, 239u);                       //       and     z23.b, z23.b, #0xef
1321     __ sve_eor(z27, __ D, 18442240474082197503u);      //       eor     z27.d, z27.d, #0xfff0000000003fff
1322     __ sve_orr(z22, __ B, 124u);                       //       orr     z22.b, z22.b, #0x7c
1323 
1324 // SVEBinaryImmOp
1325     __ sve_add(z20, __ H, 165u);                       //       add     z20.h, z20.h, #0xa5
1326     __ sve_sub(z24, __ D, 72u);                        //       sub     z24.d, z24.d, #0x48
1327     __ sve_and(z31, __ S, 4026535935u);                //       and     z31.s, z31.s, #0xf0000fff
1328     __ sve_eor(z21, __ B, 128u);                       //       eor     z21.b, z21.b, #0x80
1329     __ sve_orr(z30, __ S, 4294967293u);                //       orr     z30.s, z30.s, #0xfffffffd
1330 
1331 // SVEVectorOp
1332     __ sve_add(z26, __ H, z18, z19);                   //       add     z26.h, z18.h, z19.h
1333     __ sve_sub(z11, __ S, z13, z29);                   //       sub     z11.s, z13.s, z29.s
1334     __ sve_fadd(z5, __ S, z1, z14);                    //       fadd    z5.s, z1.s, z14.s
1335     __ sve_fmul(z2, __ S, z7, z10);                    //       fmul    z2.s, z7.s, z10.s
1336     __ sve_fsub(z19, __ S, z4, z26);                   //       fsub    z19.s, z4.s, z26.s
1337     __ sve_sqadd(z2, __ B, z3, z30);                   //       sqadd   z2.b, z3.b, z30.b
1338     __ sve_sqsub(z20, __ D, z5, z20);                  //       sqsub   z20.d, z5.d, z20.d
1339     __ sve_uqadd(z29, __ H, z13, z13);                 //       uqadd   z29.h, z13.h, z13.h
1340     __ sve_uqsub(z14, __ H, z30, z1);                  //       uqsub   z14.h, z30.h, z1.h
1341     __ sve_abs(z28, __ D, p0, z3);                     //       abs     z28.d, p0/m, z3.d
1342     __ sve_add(z9, __ B, p6, z9);                      //       add     z9.b, p6/m, z9.b, z9.b
1343     __ sve_and(z26, __ B, p2, z14);                    //       and     z26.b, p2/m, z26.b, z14.b
1344     __ sve_asr(z20, __ D, p6, z7);                     //       asr     z20.d, p6/m, z20.d, z7.d
1345     __ sve_bic(z20, __ D, p4, z6);                     //       bic     z20.d, p4/m, z20.d, z6.d
1346     __ sve_clz(z13, __ H, p0, z29);                    //       clz     z13.h, p0/m, z29.h
1347     __ sve_cnt(z9, __ B, p0, z1);                      //       cnt     z9.b, p0/m, z1.b
1348     __ sve_eor(z27, __ B, p6, z15);                    //       eor     z27.b, p6/m, z27.b, z15.b
1349     __ sve_lsl(z4, __ D, p7, z17);                     //       lsl     z4.d, p7/m, z4.d, z17.d
1350     __ sve_lsr(z2, __ B, p0, z24);                     //       lsr     z2.b, p0/m, z2.b, z24.b
1351     __ sve_mul(z26, __ B, p7, z13);                    //       mul     z26.b, p7/m, z26.b, z13.b
1352     __ sve_neg(z22, __ D, p3, z16);                    //       neg     z22.d, p3/m, z16.d
1353     __ sve_not(z17, __ D, p1, z11);                    //       not     z17.d, p1/m, z11.d
1354     __ sve_orr(z16, __ B, p0, z16);                    //       orr     z16.b, p0/m, z16.b, z16.b
1355     __ sve_rbit(z28, __ D, p1, z23);                   //       rbit    z28.d, p1/m, z23.d
1356     __ sve_revb(z28, __ D, p4, z10);                   //       revb    z28.d, p4/m, z10.d
1357     __ sve_smax(z17, __ S, p7, z7);                    //       smax    z17.s, p7/m, z17.s, z7.s
1358     __ sve_smin(z4, __ H, p3, z24);                    //       smin    z4.h, p3/m, z4.h, z24.h
1359     __ sve_umax(z9, __ B, p2, z11);                    //       umax    z9.b, p2/m, z9.b, z11.b
1360     __ sve_umin(z4, __ S, p5, z22);                    //       umin    z4.s, p5/m, z4.s, z22.s
1361     __ sve_sub(z4, __ H, p0, z15);                     //       sub     z4.h, p0/m, z4.h, z15.h
1362     __ sve_fabs(z4, __ D, p7, z26);                    //       fabs    z4.d, p7/m, z26.d
1363     __ sve_fadd(z5, __ S, p5, z26);                    //       fadd    z5.s, p5/m, z5.s, z26.s
1364     __ sve_fdiv(z31, __ S, p0, z25);                   //       fdiv    z31.s, p0/m, z31.s, z25.s
1365     __ sve_fmax(z8, __ D, p1, z3);                     //       fmax    z8.d, p1/m, z8.d, z3.d
1366     __ sve_fmin(z7, __ D, p6, z24);                    //       fmin    z7.d, p6/m, z7.d, z24.d
1367     __ sve_fmul(z24, __ S, p7, z17);                   //       fmul    z24.s, p7/m, z24.s, z17.s
1368     __ sve_fneg(z10, __ S, p3, z30);                   //       fneg    z10.s, p3/m, z30.s
1369     __ sve_frintm(z8, __ S, p6, z29);                  //       frintm  z8.s, p6/m, z29.s
1370     __ sve_frintn(z31, __ D, p5, z31);                 //       frintn  z31.d, p5/m, z31.d
1371     __ sve_frintp(z0, __ D, p5, z7);                   //       frintp  z0.d, p5/m, z7.d
1372     __ sve_fsqrt(z29, __ S, p6, z22);                  //       fsqrt   z29.s, p6/m, z22.s
1373     __ sve_fsub(z29, __ S, p6, z20);                   //       fsub    z29.s, p6/m, z29.s, z20.s
1374     __ sve_fmad(z6, __ D, p4, z18, z13);               //       fmad    z6.d, p4/m, z18.d, z13.d
1375     __ sve_fmla(z21, __ S, p2, z0, z19);               //       fmla    z21.s, p2/m, z0.s, z19.s
1376     __ sve_fmls(z28, __ D, p1, z17, z6);               //       fmls    z28.d, p1/m, z17.d, z6.d
1377     __ sve_fmsb(z20, __ D, p6, z28, z14);              //       fmsb    z20.d, p6/m, z28.d, z14.d
1378     __ sve_fnmad(z14, __ S, p4, z10, z26);             //       fnmad   z14.s, p4/m, z10.s, z26.s
1379     __ sve_fnmsb(z24, __ D, p0, z11, z15);             //       fnmsb   z24.d, p0/m, z11.d, z15.d
1380     __ sve_fnmla(z23, __ D, p5, z20, z28);             //       fnmla   z23.d, p5/m, z20.d, z28.d
1381     __ sve_fnmls(z20, __ D, p7, z24, z0);              //       fnmls   z20.d, p7/m, z24.d, z0.d
1382     __ sve_mla(z6, __ B, p5, z13, z12);                //       mla     z6.b, p5/m, z13.b, z12.b
1383     __ sve_mls(z13, __ S, p7, z26, z23);               //       mls     z13.s, p7/m, z26.s, z23.s
1384     __ sve_and(z6, z2, z29);                           //       and     z6.d, z2.d, z29.d
1385     __ sve_eor(z0, z29, z23);                          //       eor     z0.d, z29.d, z23.d
1386     __ sve_orr(z4, z5, z8);                            //       orr     z4.d, z5.d, z8.d
1387     __ sve_bic(z13, z17, z13);                         //       bic     z13.d, z17.d, z13.d
1388     __ sve_uzp1(z8, __ H, z10, z8);                    //       uzp1    z8.h, z10.h, z8.h
1389     __ sve_uzp2(z19, __ S, z0, z29);                   //       uzp2    z19.s, z0.s, z29.s
1390     __ sve_fabd(z16, __ D, p3, z23);                   //       fabd    z16.d, p3/m, z16.d, z23.d
1391     __ sve_bext(z23, __ B, z30, z13);                  //       bext    z23.b, z30.b, z13.b
1392     __ sve_bdep(z25, __ H, z22, z0);                   //       bdep    z25.h, z22.h, z0.h
1393     __ sve_eor3(z25, z30, z11);                        //       eor3    z25.d, z25.d, z30.d, z11.d
1394     __ sve_sqadd(z14, __ H, p5, z22);                  //       sqadd   z14.h, p5/m, z14.h, z22.h
1395     __ sve_sqsub(z5, __ H, p4, z0);                    //       sqsub   z5.h, p4/m, z5.h, z0.h
1396     __ sve_uqadd(z9, __ D, p0, z3);                    //       uqadd   z9.d, p0/m, z9.d, z3.d
1397     __ sve_uqsub(z14, __ H, p1, z29);                  //       uqsub   z14.h, p1/m, z14.h, z29.h
1398 
1399 // SVEReductionOp
1400     __ sve_andv(v14, __ D, p5, z4);                    //       andv d14, p5, z4.d
1401     __ sve_orv(v27, __ S, p3, z22);                    //       orv s27, p3, z22.s
1402     __ sve_eorv(v31, __ S, p6, z11);                   //       eorv s31, p6, z11.s
1403     __ sve_smaxv(v12, __ B, p4, z28);                  //       smaxv b12, p4, z28.b
1404     __ sve_sminv(v28, __ D, p4, z4);                   //       sminv d28, p4, z4.d
1405     __ sve_fminv(v6, __ D, p0, z15);                   //       fminv d6, p0, z15.d
1406     __ sve_fmaxv(v1, __ D, p5, z18);                   //       fmaxv d1, p5, z18.d
1407     __ sve_fadda(v2, __ S, p2, z4);                    //       fadda s2, p2, s2, z4.s
1408     __ sve_uaddv(v11, __ S, p2, z28);                  //       uaddv d11, p2, z28.s
1409 
1410 // AddWideNEONOp
1411     __ saddwv(v3, v4, __ T8H, v5, __ T8B);             //       saddw   v3.8H, v4.8H, v5.8B
1412     __ saddwv2(v21, v22, __ T8H, v23, __ T16B);        //       saddw2  v21.8H, v22.8H, v23.16B
1413     __ saddwv(v31, v0, __ T4S, v1, __ T4H);            //       saddw   v31.4S, v0.4S, v1.4H
1414     __ saddwv2(v11, v12, __ T4S, v13, __ T8H);         //       saddw2  v11.4S, v12.4S, v13.8H
1415     __ saddwv(v24, v25, __ T2D, v26, __ T2S);          //       saddw   v24.2D, v25.2D, v26.2S
1416     __ saddwv2(v21, v22, __ T2D, v23, __ T4S);         //       saddw2  v21.2D, v22.2D, v23.4S
1417     __ uaddwv(v15, v16, __ T8H, v17, __ T8B);          //       uaddw   v15.8H, v16.8H, v17.8B
1418     __ uaddwv2(v12, v13, __ T8H, v14, __ T16B);        //       uaddw2  v12.8H, v13.8H, v14.16B
1419     __ uaddwv(v6, v7, __ T4S, v8, __ T4H);             //       uaddw   v6.4S, v7.4S, v8.4H
1420     __ uaddwv2(v13, v14, __ T4S, v15, __ T8H);         //       uaddw2  v13.4S, v14.4S, v15.8H
1421     __ uaddwv(v8, v9, __ T2D, v10, __ T2S);            //       uaddw   v8.2D, v9.2D, v10.2S
1422     __ uaddwv2(v15, v16, __ T2D, v17, __ T4S);         //       uaddw2  v15.2D, v16.2D, v17.4S
1423 
1424     __ bind(forth);
1425 
1426 /*
1427 */
1428 
1429   static const unsigned int insns[] =
1430   {
1431     0x8b0d82fa,     0xcb49970c,     0xab889dfc,     0xeb9ee787,
1432     0x0b9b3ec9,     0x4b9179a3,     0x2b88474e,     0x6b8c56c0,
1433     0x8a1a51e0,     0xaa11f4ba,     0xca0281b8,     0xea918c7c,
1434     0x0a5d4a19,     0x2a4b262d,     0x4a513ca5,     0x6a9b6ae2,
1435     0x8a70b79b,     0xaaba9728,     0xca6dfe3d,     0xea627f1c,
1436     0x0aa70f53,     0x2aaa0f06,     0x4a6176a4,     0x6a604eb0,
1437     0x1105ed91,     0x3100583e,     0x5101f8bd,     0x710f0306,
1438     0x9101a1a0,     0xb10a5cc8,     0xd10810aa,     0xf10fd061,
1439     0x120cb166,     0x321764bc,     0x52174681,     0x720c0227,
1440     0x9241018e,     0xb25a2969,     0xd278b411,     0xf26aad01,
1441     0x14000000,     0x17ffffd7,     0x140004b0,     0x94000000,
1442     0x97ffffd4,     0x940004ad,     0x3400000a,     0x34fffa2a,
1443     0x3400954a,     0x35000008,     0x35fff9c8,     0x350094e8,
1444     0xb400000b,     0xb4fff96b,     0xb400948b,     0xb500001d,
1445     0xb5fff91d,     0xb500943d,     0x10000013,     0x10fff8b3,
1446     0x100093d3,     0x90000013,     0x36300016,     0x3637f836,
1447     0x36309356,     0x3758000c,     0x375ff7cc,     0x375892ec,
1448     0x128313a0,     0x528a32c7,     0x7289173b,     0x92ab3acc,
1449     0xd2a0bf94,     0xf2c285e8,     0x9358722f,     0x330e652f,
1450     0x53067f3b,     0x93577c53,     0xb34a1aac,     0xd35a4016,
1451     0x13946c63,     0x93c3dbc8,     0x54000000,     0x54fff5a0,
1452     0x540090c0,     0x54000001,     0x54fff541,     0x54009061,
1453     0x54000002,     0x54fff4e2,     0x54009002,     0x54000002,
1454     0x54fff482,     0x54008fa2,     0x54000003,     0x54fff423,
1455     0x54008f43,     0x54000003,     0x54fff3c3,     0x54008ee3,
1456     0x54000004,     0x54fff364,     0x54008e84,     0x54000005,
1457     0x54fff305,     0x54008e25,     0x54000006,     0x54fff2a6,
1458     0x54008dc6,     0x54000007,     0x54fff247,     0x54008d67,
1459     0x54000008,     0x54fff1e8,     0x54008d08,     0x54000009,
1460     0x54fff189,     0x54008ca9,     0x5400000a,     0x54fff12a,
1461     0x54008c4a,     0x5400000b,     0x54fff0cb,     0x54008beb,
1462     0x5400000c,     0x54fff06c,     0x54008b8c,     0x5400000d,
1463     0x54fff00d,     0x54008b2d,     0x5400000e,     0x54ffefae,
1464     0x54008ace,     0x5400000f,     0x54ffef4f,     0x54008a6f,
1465     0xd40658e1,     0xd4014d22,     0xd4046543,     0xd4273f60,
1466     0xd44cad80,     0xd503201f,     0xd503203f,     0xd503205f,
1467     0xd503209f,     0xd50320bf,     0xd503219f,     0xd50323bf,
1468     0xd503239f,     0xd50321df,     0xd50323ff,     0xd50323df,
1469     0xd503211f,     0xd503233f,     0xd503231f,     0xd503215f,
1470     0xd503237f,     0xd503235f,     0xd69f03e0,     0xd6bf03e0,
1471     0xd5033fdf,     0xd50330ff,     0xd503207f,     0xd50320ff,
1472     0xd5033e9f,     0xd50332bf,     0xd61f0200,     0xd63f0280,
1473     0xdac123ea,     0xdac127fb,     0xdac12be8,     0xdac12fe0,
1474     0xdac133e1,     0xdac137f5,     0xdac13bf1,     0xdac13ffd,
1475     0xdac147fd,     0xd61f0b9f,     0xd61f0c3f,     0xd63f0aff,
1476     0xd63f0ebf,     0xd51b4434,     0xd51b4216,     0xd53b443b,
1477     0xd53b4213,     0xd53b00eb,     0xd53b0030,     0xdac143e6,
1478     0xc8117c80,     0xc80afed8,     0xc85f7e6a,     0xc85ffca1,
1479     0xc89ffd1e,     0xc8dffe2c,     0x88097cee,     0x8801fe05,
1480     0x885f7d82,     0x885ffd8a,     0x889fff83,     0x88dfff4e,
1481     0x481e7dca,     0x4815fd2d,     0x485f7f76,     0x485ffe7c,
1482     0x489fffcb,     0x48dffc53,     0x08027c37,     0x0800fe0c,
1483     0x085f7ded,     0x085ffeb1,     0x089ffd6d,     0x08dffd1e,
1484     0xc87f3578,     0xc87feaa1,     0xc83b506d,     0xc82c87a6,
1485     0x887f1166,     0x887f93d0,     0x883e32a4,     0x883bf12f,
1486     0xf80011f9,     0xb81b1022,     0x381ea354,     0x79002fd7,
1487     0xf85cf39a,     0xb8580309,     0x385e218c,     0x784051e1,
1488     0x389e11d8,     0x789fa1f8,     0x79c01865,     0xb881131b,
1489     0xfc5dd3ad,     0xbc5d1137,     0xfc00900b,     0xbc181015,
1490     0xf818ec7d,     0xb81b8c91,     0x381efc40,     0x78007c3d,
1491     0xf857beb0,     0xb8413dd4,     0x385fddd6,     0x78409e2f,
1492     0x389eddea,     0x789e7d94,     0x78de3d55,     0xb8805c13,
1493     0xfc5cadc0,     0xbc428c23,     0xfc1a2dc4,     0xbc1caf92,
1494     0xf81475f6,     0xb81f95d1,     0x381e757e,     0x78014561,
1495     0xf8402436,     0xb85896e2,     0x385f4763,     0x785db4f0,
1496     0x3880374f,     0x789e25e7,     0x78dd0563,     0xb88166f9,
1497     0xfc529540,     0xbc4374d3,     0xfc1166ae,     0xbc1ba6c0,
1498     0xf820ea7b,     0xb82d68c8,     0x38367a04,     0x782f4b59,
1499     0xf878c8a4,     0xb8674a24,     0x386b78f1,     0x78776bc0,
1500     0x38a15aca,     0x78bedbd5,     0x78fcd94b,     0xb8aa4a7c,
1501     0xfc6ecbbe,     0xbc65d8a8,     0xfc2de919,     0xbc3a7b11,
1502     0xf91f1193,     0xb91ed5f7,     0x391ec9bd,     0x79182ceb,
1503     0xf95d4b0a,     0xb9581010,     0x395fc034,     0x795fb221,
1504     0x399d8731,     0x799efb3b,     0x79dd1a2e,     0xb998e4ea,
1505     0xfd583723,     0xbd5ea12c,     0xfd18dc38,     0xbd1b0e83,
1506     0x58ffda82,     0x1800001d,     0xf885d1c0,     0xd8ffda20,
1507     0xf8a77820,     0xf9980220,     0x1a030301,     0x3a140311,
1508     0x5a0d000b,     0x7a07015c,     0x9a1001e4,     0xba140182,
1509     0xda0d01bd,     0xfa0c00ce,     0x0b31f194,     0x2b206d7b,
1510     0xcb29f027,     0x6b210f63,     0x8b2cb34d,     0xab2a88b1,
1511     0xcb2f511e,     0xeb3332f3,     0x3a4533aa,     0x7a4d312b,
1512     0xba442146,     0xfa42818c,     0x3a466a02,     0x7a4b68ed,
1513     0xba4a9b6b,     0xfa4dd86d,     0x1a8a637a,     0x1a9cd6aa,
1514     0x5a9bd137,     0x5a8fd7aa,     0x9a95233e,     0x9a95c620,
1515     0xda9422b0,     0xda8397d3,     0x5ac00173,     0x5ac00418,
1516     0x5ac00b3b,     0x5ac0106e,     0x5ac0162e,     0xdac001e7,
1517     0xdac00798,     0xdac00b31,     0xdac00f42,     0xdac010bc,
1518     0xdac01759,     0xdac1021b,     0xdac104d1,     0xdac10995,
1519     0xdac10c80,     0xdac1136c,     0xdac11791,     0xdac1185c,
1520     0xdac11d51,     0xd71f09ee,     0xd71f0dc3,     0xd73f0b2f,
1521     0xd73f0e6e,     0x1ac40a05,     0x1ac40f3a,     0x1acc2042,
1522     0x1ac8263d,     0x1ac42867,     0x1ada2c99,     0x9ad10899,
1523     0x9ad10f40,     0x9ad521f7,     0x9adb263c,     0x9ac0286a,
1524     0x9ac92f27,     0x9bdd7de6,     0x9b427d4f,     0x1b0b2cf1,
1525     0x1b1ddcf7,     0x9b0b2f6e,     0x9b0cbf04,     0x9b2b728e,
1526     0x9b2cdd6d,     0x9bae275e,     0x9ba7954d,     0x7ec315fe,
1527     0x1ef0098c,     0x1ef21bff,     0x1ef02ab3,     0x1ef5394f,
1528     0x1efc4942,     0x1eff5bc7,     0x1ee28832,     0x7ea3d546,
1529     0x1e270979,     0x1e201981,     0x1e3d2a63,     0x1e263ae6,
1530     0x1e3b4b80,     0x1e2758a2,     0x1e39899d,     0x7ef8d58d,
1531     0x1e720913,     0x1e751b56,     0x1e622a74,     0x1e683ade,
1532     0x1e754a76,     0x1e755a4c,     0x1e638a06,     0x1fc373a3,
1533     0x1f0a35cf,     0x1f0aea4c,     0x1f2f74e7,     0x1f2032e0,
1534     0x1f4d21d8,     0x1f49d0ef,     0x1f7f43b3,     0x1f705522,
1535     0x1e20409e,     0x1e20c361,     0x1e214319,     0x1e21c2ae,
1536     0x1e22c0cd,     0x1e23c32c,     0x1ee243d9,     0x1e6042bc,
1537     0x1e60c2f0,     0x1e6143a5,     0x1e61c276,     0x1e62428d,
1538     0x1ee1c393,     0x1e3800d1,     0x9e3800ed,     0x1e78035c,
1539     0x9e7800d1,     0x1e220081,     0x9e22028e,     0x1e6202a7,
1540     0x9e6202fb,     0x1e24028d,     0x9e64039e,     0x1e3002aa,
1541     0x9e700225,     0x1e2601cb,     0x9e6602ad,     0x1e2701db,
1542     0x9e6702e4,     0x1e3e2300,     0x1e6e2180,     0x1e202228,
1543     0x1e602388,     0x29021b40,     0x297c78c0,     0x69660970,
1544     0xa908018f,     0xa9427ae7,     0x29a03cfa,     0x29fc3d4b,
1545     0x69c84033,     0xa988240e,     0xa9fa0d9b,     0x28a02d88,
1546     0x28c8408a,     0x68f87a6a,     0xa8ba09f8,     0xa8c52a18,
1547     0x280257be,     0x28727948,     0xa83868de,     0xa8440a98,
1548     0x0c40733f,     0x4cdfa1e5,     0x0ccd6cea,     0x4cdf260d,
1549     0x0d40c227,     0x4ddfcb30,     0x0dc7cc6b,     0x4c408ced,
1550     0x0cdf8769,     0x4d60c346,     0x0dffca17,     0x4de8cda6,
1551     0x4cda4834,     0x0c4049ef,     0x4d40e6dd,     0x4ddfe946,
1552     0x0dcfeccf,     0x4cdf0546,     0x0cc7006b,     0x0d60e32c,
1553     0x0dffe5eb,     0x0dfce8de,     0x0e31bb9b,     0x4e31bbbc,
1554     0x0e71b841,     0x4e71bbbc,     0x4eb1b841,     0x0e30aab4,
1555     0x4e30abdd,     0x0e70aa30,     0x4e70a9cd,     0x4eb0a96a,
1556     0x6e30fbdd,     0x0e31abdd,     0x2e31aa93,     0x4e31aaf6,
1557     0x6e31a96a,     0x0e71a8a4,     0x2e71a81f,     0x4e71aad5,
1558     0x6e71a928,     0x4eb1a81f,     0x6eb1aa93,     0x6eb0f96a,
1559     0x7e30fbbc,     0x7e70f862,     0x7eb0fb59,     0x7ef0f8c5,
1560     0x0ea0c883,     0x4ea0c928,     0x4ee0caf6,     0x2ea0ca93,
1561     0x6ea0c9cd,     0x6ee0c8c5,     0x0ea0dbdd,     0x4ea0db38,
1562     0x4ee0dad5,     0x0ea0eb7a,     0x4ea0eb38,     0x4ee0e883,
1563     0x2ea0db38,     0x6ea0db7a,     0x6ee0db17,     0x0e20ba0f,
1564     0x4e20bad5,     0x0e60b883,     0x4e60bb38,     0x0ea0b928,
1565     0x4ea0bb59,     0x4ee0bab4,     0x0ea0fa30,     0x4ea0fa51,
1566     0x4ee0f862,     0x0ef8f841,     0x4ef8f820,     0x2ea0fb38,
1567     0x6ea0f8a4,     0x6ee0f883,     0x2ef8f9ac,     0x6ef8f81f,
1568     0x2ea1fbbc,     0x6ea1f96a,     0x6ee1fb7a,     0x2ef9f862,
1569     0x6ef9f9ac,     0x2e205a72,     0x6e20581f,     0x0e231c41,
1570     0x4e2f1dcd,     0x0ebf1fdd,     0x4ea21c20,     0x2e351e93,
1571     0x6e2e1dac,     0x0e338651,     0x4e3886f6,     0x0e6f85cd,
1572     0x4e7e87bc,     0x0ea087fe,     0x4ea1841f,     0x4ee38441,
1573     0x0e3c0f7a,     0x4e3e0fbc,     0x0e660ca4,     0x4e600ffe,
1574     0x0ea60ca4,     0x4ea80ce6,     0x4ee00ffe,     0x2e3c0f7a,
1575     0x6e340e72,     0x2e6b0d49,     0x6e6a0d28,     0x2eae0dac,
1576     0x6ea20c20,     0x6ef60eb4,     0x0e23d441,     0x4e3ad738,
1577     0x4e64d462,     0x0e421420,     0x4e4b1549,     0x2e3a8738,
1578     0x6e3c877a,     0x2e728630,     0x6e6087fe,     0x2ea58483,
1579     0x6eac856a,     0x6ef98717,     0x0e2c2d6a,     0x4e262ca4,
1580     0x0e742e72,     0x4e642c62,     0x0ead2d8b,     0x4eaa2d28,
1581     0x4eec2d6a,     0x2e312e0f,     0x6e332e51,     0x2e642c62,
1582     0x6e6c2d6a,     0x2eae2dac,     0x6eae2dac,     0x6ef12e0f,
1583     0x0eafd5cd,     0x4ea4d462,     0x4ee9d507,     0x0ed616b4,
1584     0x4edc177a,     0x0e329e30,     0x4e269ca4,     0x0e649c62,
1585     0x4e669ca4,     0x0eae9dac,     0x4eb49e72,     0x2eb7d6d5,
1586     0x6eb2d630,     0x6ef4d672,     0x2ecd158b,     0x6ed716d5,
1587     0x2e39d717,     0x6e2ed5ac,     0x6e7cd77a,     0x2e591717,
1588     0x6e5e17bc,     0x2e30ddee,     0x6e2ddd8b,     0x6e7adf38,
1589     0x2e431c41,     0x6e4e1dac,     0x0e61941f,     0x4e6c956a,
1590     0x0eb29630,     0x4ea99507,     0x0e24cc62,     0x4e25cc83,
1591     0x4e6fcdcd,     0x0e550e93,     0x4e530e51,     0x2e729630,
1592     0x6e659483,     0x2ea39441,     0x6ead958b,     0x0ea0cffe,
1593     0x4ea7ccc5,     0x4eeacd28,     0x0ed10e0f,     0x4edf0fdd,
1594     0x2e20fffe,     0x6e22fc20,     0x6e76feb4,     0x2e493d07,
1595     0x6e563eb4,     0x0e396717,     0x4e3e67bc,     0x0e7766d5,
1596     0x4e7d679b,     0x0ebb6759,     0x4ea764c5,     0x2e236441,
1597     0x6e396717,     0x2e726630,     0x6e61641f,     0x2ea764c5,
1598     0x6eae65ac,     0x0e2ba549,     0x4e3ea7bc,     0x0e71a60f,
1599     0x4e7fa7dd,     0x0eb8a6f6,     0x4ea1a41f,     0x0e35f693,
1600     0x4e21f41f,     0x4e67f4c5,     0x0e5035ee,     0x4e543672,
1601     0x0e216c1f,     0x4e346e72,     0x0e7d6f9b,     0x4e766eb4,
1602     0x0eb26e30,     0x4eae6dac,     0x2e2d6d8b,     0x6e2b6d49,
1603     0x2e686ce6,     0x6e606ffe,     0x2eb36e51,     0x6ebd6f9b,
1604     0x0e3eafbc,     0x4e20affe,     0x0e69ad07,     0x4e6cad6a,
1605     0x0eb6aeb4,     0x4eacad6a,     0x0e66b4a4,     0x4e7ab738,
1606     0x0eb3b651,     0x4eb3b651,     0x0e3826f6,     0x4e252483,
1607     0x0e7f27dd,     0x4e71260f,     0x0eb826f6,     0x4eb52693,
1608     0x0eb5f693,     0x4eb8f6f6,     0x4ee4f462,     0x0ed1360f,
1609     0x4ec834e6,     0x2eaeedac,     0x6eb2ee30,     0x6eeded8b,
1610     0x2ecf2dcd,     0x6ed92f17,     0x0f81100f,     0x4f848862,
1611     0x4fc31841,     0x0fad518b,     0x4fa780c5,     0x4fd059ee,
1612     0x2fa890e6,     0x4fa38841,     0x6fc1900f,     0x0f7b8149,
1613     0x4f4688a4,     0x0faf81cd,     0x4fa58083,     0x0e3736d5,
1614     0x4e393717,     0x0e61341f,     0x4e7b3759,     0x0ea43462,
1615     0x4ea1341f,     0x4efd379b,     0x0e343e72,     0x4e2c3d6a,
1616     0x0e793f17,     0x4e753e93,     0x0ea53c83,     0x4eb43e72,
1617     0x4ee23c20,     0x2e3b8f59,     0x6e3c8f7a,     0x2e798f17,
1618     0x6e648c62,     0x2eb48e72,     0x6eae8dac,     0x6ee68ca4,
1619     0x2e3e37bc,     0x6e2037fe,     0x2e7f37dd,     0x6e723630,
1620     0x2ebd379b,     0x6ea834e6,     0x6eeb3549,     0x2e3f3fdd,
1621     0x6e343e72,     0x2e693d07,     0x6e663ca4,     0x2ea93d07,
1622     0x6eb13e0f,     0x6eeb3d49,     0x0e39e717,     0x4e2ae528,
1623     0x4e64e462,     0x2ebee7bc,     0x6eb7e6d5,     0x6ee1e41f,
1624     0x2e27e4c5,     0x6e3de79b,     0x6e62e420,     0x659239e8,
1625     0x65d03b94,     0x65d0232d,     0x65d120c2,     0x659129f2,
1626     0x65933ca3,     0x25969683,     0x25961d15,     0x254d1c48,
1627     0x259e3f61,     0x25953b96,     0x255b91d1,     0x247686ed,
1628     0x24309098,     0x2462edb9,     0x24a57468,     0xba5fd3e3,
1629     0x3a5f03e5,     0xfa411be4,     0x7a42cbe2,     0x93df03ff,
1630     0xc820ffff,     0x8822fc7f,     0xc8247cbf,     0x88267fff,
1631     0x4e010fe0,     0x5e040420,     0x4e081fe1,     0x4e0c1fe1,
1632     0x4e0a1fe1,     0x4e071fe1,     0x4e042c20,     0x4e062c20,
1633     0x4e052c20,     0x4e083c20,     0x0e0c3c20,     0x0e0a3c20,
1634     0x0e073c20,     0x9eae0020,     0x0f03f409,     0x6f03f40e,
1635     0x4cc0ac3f,     0x0ea1b820,     0x0ef9b820,     0x4ef9b820,
1636     0x4e21c862,     0x0e79c862,     0x4e79c862,     0x4e61b8a4,
1637     0x0e79b8a4,     0x4e79b8a4,     0x05a08020,     0x05104fe0,
1638     0x05505001,     0x05906fe2,     0x05d03005,     0x05101fea,
1639     0x05901feb,     0x04b0e3e0,     0x0470e7e1,     0x042f9c20,
1640     0x043f9c35,     0x047f9c20,     0x04ff9c20,     0x04299420,
1641     0x04319160,     0x0461943e,     0x04a19020,     0x04038100,
1642     0x040381a0,     0x040387e1,     0x04438be2,     0x04c38fe3,
1643     0x040181e0,     0x04018100,     0x04018621,     0x04418b22,
1644     0x04418822,     0x04818c23,     0x040081e0,     0x04008120,
1645     0x04008761,     0x04008621,     0x04408822,     0x04808c23,
1646     0x042053ff,     0x047f5401,     0x25208028,     0x2538cfe0,
1647     0x2578d001,     0x25b8efe2,     0x25f8f007,     0x2538dfea,
1648     0x25b8dfeb,     0xa400a3e0,     0xa420a7e0,     0xa4484be0,
1649     0xa467afe0,     0xa4a8a7ea,     0xa547a814,     0xa4084ffe,
1650     0xa55c53e0,     0xa5e1540b,     0xe400fbf6,     0xe408ffff,
1651     0xe420e7e0,     0xe4484be0,     0xe460efe0,     0xe547e400,
1652     0xe4014be0,     0xe4a84fe0,     0xe5f15000,     0x858043e0,
1653     0x85a043ff,     0xe59f5d08,     0x0420e3e9,     0x0460e3ea,
1654     0x04a0e3eb,     0x04e0e3ec,     0x25104042,     0x25104871,
1655     0x25904861,     0x25904c92,     0x05344020,     0x05744041,
1656     0x05b44062,     0x05f44083,     0x252c8840,     0x253c1420,
1657     0x25681572,     0x25a21ce3,     0x25ea1e34,     0x253c0421,
1658     0x25680572,     0x25a20ce3,     0x25ea0e34,     0x0522c020,
1659     0x05e6c0a4,     0x2401a001,     0x2443a051,     0x24858881,
1660     0x24c78cd1,     0x24850891,     0x24c70cc1,     0x250f9001,
1661     0x25508051,     0x25802491,     0x25df28c1,     0x25850c81,
1662     0x251e10d1,     0x65816001,     0x65c36051,     0x65854891,
1663     0x65c74cc1,     0x05733820,     0x05b238a4,     0x05f138e6,
1664     0x0570396a,     0x65d0a001,     0x65d6a443,     0x65d4a826,
1665     0x6594ac26,     0x6554ac26,     0x6556ac26,     0x6552ac26,
1666     0x65cbac85,     0x65caac01,     0x6589ac85,     0x6588ac01,
1667     0x65c9ac85,     0x65c8ac01,     0x65dea833,     0x659ca509,
1668     0x65d8a801,     0x65dcac01,     0x655cb241,     0x0520a1e0,
1669     0x0521a601,     0x052281e0,     0x05238601,     0x04a14026,
1670     0x042244a6,     0x046344a6,     0x04a444a6,     0x04e544a7,
1671     0x0568aca7,     0x05b23230,     0x853040af,     0xc5b040af,
1672     0xe57080af,     0xe5b080af,     0x25034440,     0x254054c4,
1673     0x25034640,     0x25415a05,     0x25834440,     0x25c54489,
1674     0x250b5d3a,     0x2550dc20,     0x2518e3e1,     0x2518e021,
1675     0x2518e0a1,     0x2518e121,     0x2518e1a1,     0x2558e3e2,
1676     0x2558e042,     0x2558e0c2,     0x2558e142,     0x2598e3e3,
1677     0x2598e063,     0x2598e0e3,     0x2598e163,     0x25d8e3e4,
1678     0x25d8e084,     0x25d8e104,     0x25d8e184,     0x2518e407,
1679     0x05214800,     0x05614800,     0x05a14800,     0x05e14800,
1680     0x05214c00,     0x05614c00,     0x05a14c00,     0x05e14c00,
1681     0x05304001,     0x05314001,     0x05a18610,     0x05e18610,
1682     0x05271e11,     0x6545e891,     0x6585e891,     0x65c5e891,
1683     0x6545c891,     0x6585c891,     0x65c5c891,     0x45b0c210,
1684     0x45f1c231,     0x1e601000,     0x1e603000,     0x1e621000,
1685     0x1e623000,     0x1e641000,     0x1e643000,     0x1e661000,
1686     0x1e663000,     0x1e681000,     0x1e683000,     0x1e6a1000,
1687     0x1e6a3000,     0x1e6c1000,     0x1e6c3000,     0x1e6e1000,
1688     0x1e6e3000,     0x1e701000,     0x1e703000,     0x1e721000,
1689     0x1e723000,     0x1e741000,     0x1e743000,     0x1e761000,
1690     0x1e763000,     0x1e781000,     0x1e783000,     0x1e7a1000,
1691     0x1e7a3000,     0x1e7c1000,     0x1e7c3000,     0x1e7e1000,
1692     0x1e7e3000,     0xf8268267,     0xf82d023c,     0xf8301046,
1693     0xf83d2083,     0xf8263290,     0xf82d528c,     0xf8284299,
1694     0xf8337160,     0xf8386286,     0xf8bf820e,     0xf8a600e0,
1695     0xf8af1353,     0xf8a922ea,     0xf8b53396,     0xf8a251e3,
1696     0xf8b340f4,     0xf8a470fd,     0xf8a06209,     0xf8f48097,
1697     0xf8f002ea,     0xf8eb10d9,     0xf8ff21b0,     0xf8f7302c,
1698     0xf8ee52a9,     0xf8f041fa,     0xf8e471e4,     0xf8e863c6,
1699     0xf864823d,     0xf87d013a,     0xf86f1162,     0xf87d20e3,
1700     0xf86132bb,     0xf870510e,     0xf8704336,     0xf86572b4,
1701     0xf8706217,     0xb83e8294,     0xb8200264,     0xb8381284,
1702     0xb8242358,     0xb8333102,     0xb828530e,     0xb83042df,
1703     0xb824703f,     0xb82a6194,     0xb8a080e9,     0xb8b80090,
1704     0xb8bb1146,     0xb8bb21b8,     0xb8b032df,     0xb8b653f4,
1705     0xb8bd41c9,     0xb8b47287,     0xb8bc6169,     0xb8ee828c,
1706     0xb8e10138,     0xb8f3126d,     0xb8f020b0,     0xb8e03183,
1707     0xb8e851ef,     0xb8f041e4,     0xb8fe7005,     0xb8ea6376,
1708     0xb8638120,     0xb873015d,     0xb8781284,     0xb86723b8,
1709     0xb86e3175,     0xb87b51ed,     0xb87f41d1,     0xb863721e,
1710     0xb87660f4,     0xce216874,     0xce104533,     0xce648c15,
1711     0xce8e3302,     0xce6e82ab,     0xce6c87d1,     0xcec08063,
1712     0xce638937,     0x25e0c358,     0x25a1c7d3,     0x0580785a,
1713     0x05426328,     0x05009892,     0x25a0cc29,     0x2561cec8,
1714     0x058044b3,     0x05401c99,     0x05006b49,     0x25e0d6f7,
1715     0x2561c528,     0x0583c8bc,     0x0542522f,     0x05001ec0,
1716     0x25e0de65,     0x25a1c113,     0x05803cad,     0x0540f3c0,
1717     0x0500ab15,     0x2560c28c,     0x2561d7c0,     0x05801ed7,
1718     0x0542633b,     0x05003696,     0x2560d4b4,     0x25e1c918,
1719     0x058021ff,     0x05400e15,     0x0500f3de,     0x0473025a,
1720     0x04bd05ab,     0x658e0025,     0x658a08e2,     0x659a0493,
1721     0x043e1062,     0x04f418b4,     0x046d15bd,     0x04611fce,
1722     0x04d6a07c,     0x04001929,     0x041a09da,     0x04d098f4,
1723     0x04db10d4,     0x0459a3ad,     0x041aa029,     0x041919fb,
1724     0x04d39e24,     0x04118302,     0x04101dba,     0x04d7ae16,
1725     0x04dea571,     0x04180210,     0x05e786fc,     0x05e4915c,
1726     0x04881cf1,     0x044a0f04,     0x04090969,     0x048b16c4,
1727     0x044101e4,     0x04dcbf44,     0x65809745,     0x658d833f,
1728     0x65c68468,     0x65c79b07,     0x65829e38,     0x049dafca,
1729     0x6582bba8,     0x65c0b7ff,     0x65c1b4e0,     0x658dbadd,
1730     0x65819a9d,     0x65ed9246,     0x65b30815,     0x65e6263c,
1731     0x65eebb94,     0x65bad14e,     0x65efe178,     0x65fc5697,
1732     0x65e07f14,     0x040c55a6,     0x04977f4d,     0x043d3046,
1733     0x04b733a0,     0x046830a4,     0x04ed322d,     0x05686948,
1734     0x05bd6c13,     0x65c88ef0,     0x450db3d7,     0x4540b6d9,
1735     0x043e3979,     0x445896ce,     0x445a9005,     0x44d98069,
1736     0x445b87ae,     0x04da348e,     0x04982edb,     0x0499397f,
1737     0x0408338c,     0x04ca309c,     0x65c721e6,     0x65c63641,
1738     0x65982882,     0x04812b8b,     0x0e251083,     0x4e3712d5,
1739     0x0e61101f,     0x4e6d118b,     0x0eba1338,     0x4eb712d5,
1740     0x2e31120f,     0x6e2e11ac,     0x2e6810e6,     0x6e6f11cd,
1741     0x2eaa1128,     0x6eb1120f,
1742   };
1743 // END  Generated code -- do not edit