qemu/target/arm/neon_helper.c
<<
>>
Prefs
   1/*
   2 * ARM NEON vector operations.
   3 *
   4 * Copyright (c) 2007, 2008 CodeSourcery.
   5 * Written by Paul Brook
   6 *
   7 * This code is licensed under the GNU GPL v2.
   8 */
   9#include "qemu/osdep.h"
  10
  11#include "cpu.h"
  12#include "exec/helper-proto.h"
  13#include "fpu/softfloat.h"
  14
  15#define SIGNBIT (uint32_t)0x80000000
  16#define SIGNBIT64 ((uint64_t)1 << 63)
  17
  18#define SET_QC() env->vfp.qc[0] = 1
  19
  20#define NEON_TYPE1(name, type) \
  21typedef struct \
  22{ \
  23    type v1; \
  24} neon_##name;
  25#ifdef HOST_WORDS_BIGENDIAN
  26#define NEON_TYPE2(name, type) \
  27typedef struct \
  28{ \
  29    type v2; \
  30    type v1; \
  31} neon_##name;
  32#define NEON_TYPE4(name, type) \
  33typedef struct \
  34{ \
  35    type v4; \
  36    type v3; \
  37    type v2; \
  38    type v1; \
  39} neon_##name;
  40#else
  41#define NEON_TYPE2(name, type) \
  42typedef struct \
  43{ \
  44    type v1; \
  45    type v2; \
  46} neon_##name;
  47#define NEON_TYPE4(name, type) \
  48typedef struct \
  49{ \
  50    type v1; \
  51    type v2; \
  52    type v3; \
  53    type v4; \
  54} neon_##name;
  55#endif
  56
  57NEON_TYPE4(s8, int8_t)
  58NEON_TYPE4(u8, uint8_t)
  59NEON_TYPE2(s16, int16_t)
  60NEON_TYPE2(u16, uint16_t)
  61NEON_TYPE1(s32, int32_t)
  62NEON_TYPE1(u32, uint32_t)
  63#undef NEON_TYPE4
  64#undef NEON_TYPE2
  65#undef NEON_TYPE1
  66
  67/* Copy from a uint32_t to a vector structure type.  */
  68#define NEON_UNPACK(vtype, dest, val) do { \
  69    union { \
  70        vtype v; \
  71        uint32_t i; \
  72    } conv_u; \
  73    conv_u.i = (val); \
  74    dest = conv_u.v; \
  75    } while(0)
  76
  77/* Copy from a vector structure type to a uint32_t.  */
  78#define NEON_PACK(vtype, dest, val) do { \
  79    union { \
  80        vtype v; \
  81        uint32_t i; \
  82    } conv_u; \
  83    conv_u.v = (val); \
  84    dest = conv_u.i; \
  85    } while(0)
  86
  87#define NEON_DO1 \
  88    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
  89#define NEON_DO2 \
  90    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
  91    NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
  92#define NEON_DO4 \
  93    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
  94    NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
  95    NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
  96    NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
  97
  98#define NEON_VOP_BODY(vtype, n) \
  99{ \
 100    uint32_t res; \
 101    vtype vsrc1; \
 102    vtype vsrc2; \
 103    vtype vdest; \
 104    NEON_UNPACK(vtype, vsrc1, arg1); \
 105    NEON_UNPACK(vtype, vsrc2, arg2); \
 106    NEON_DO##n; \
 107    NEON_PACK(vtype, res, vdest); \
 108    return res; \
 109}
 110
 111#define NEON_VOP(name, vtype, n) \
 112uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
 113NEON_VOP_BODY(vtype, n)
 114
 115#define NEON_VOP_ENV(name, vtype, n) \
 116uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \
 117NEON_VOP_BODY(vtype, n)
 118
 119/* Pairwise operations.  */
 120/* For 32-bit elements each segment only contains a single element, so
 121   the elementwise and pairwise operations are the same.  */
 122#define NEON_PDO2 \
 123    NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
 124    NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
 125#define NEON_PDO4 \
 126    NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
 127    NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
 128    NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
 129    NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
 130
 131#define NEON_POP(name, vtype, n) \
 132uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
 133{ \
 134    uint32_t res; \
 135    vtype vsrc1; \
 136    vtype vsrc2; \
 137    vtype vdest; \
 138    NEON_UNPACK(vtype, vsrc1, arg1); \
 139    NEON_UNPACK(vtype, vsrc2, arg2); \
 140    NEON_PDO##n; \
 141    NEON_PACK(vtype, res, vdest); \
 142    return res; \
 143}
 144
 145/* Unary operators.  */
 146#define NEON_VOP1(name, vtype, n) \
 147uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
 148{ \
 149    vtype vsrc1; \
 150    vtype vdest; \
 151    NEON_UNPACK(vtype, vsrc1, arg); \
 152    NEON_DO##n; \
 153    NEON_PACK(vtype, arg, vdest); \
 154    return arg; \
 155}
 156
 157
 158#define NEON_USAT(dest, src1, src2, type) do { \
 159    uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
 160    if (tmp != (type)tmp) { \
 161        SET_QC(); \
 162        dest = ~0; \
 163    } else { \
 164        dest = tmp; \
 165    }} while(0)
 166#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
 167NEON_VOP_ENV(qadd_u8, neon_u8, 4)
 168#undef NEON_FN
 169#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
 170NEON_VOP_ENV(qadd_u16, neon_u16, 2)
 171#undef NEON_FN
 172#undef NEON_USAT
 173
 174uint32_t HELPER(neon_qadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
 175{
 176    uint32_t res = a + b;
 177    if (res < a) {
 178        SET_QC();
 179        res = ~0;
 180    }
 181    return res;
 182}
 183
 184uint64_t HELPER(neon_qadd_u64)(CPUARMState *env, uint64_t src1, uint64_t src2)
 185{
 186    uint64_t res;
 187
 188    res = src1 + src2;
 189    if (res < src1) {
 190        SET_QC();
 191        res = ~(uint64_t)0;
 192    }
 193    return res;
 194}
 195
 196#define NEON_SSAT(dest, src1, src2, type) do { \
 197    int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
 198    if (tmp != (type)tmp) { \
 199        SET_QC(); \
 200        if (src2 > 0) { \
 201            tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
 202        } else { \
 203            tmp = 1 << (sizeof(type) * 8 - 1); \
 204        } \
 205    } \
 206    dest = tmp; \
 207    } while(0)
 208#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
 209NEON_VOP_ENV(qadd_s8, neon_s8, 4)
 210#undef NEON_FN
 211#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
 212NEON_VOP_ENV(qadd_s16, neon_s16, 2)
 213#undef NEON_FN
 214#undef NEON_SSAT
 215
 216uint32_t HELPER(neon_qadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
 217{
 218    uint32_t res = a + b;
 219    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
 220        SET_QC();
 221        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 222    }
 223    return res;
 224}
 225
 226uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
 227{
 228    uint64_t res;
 229
 230    res = src1 + src2;
 231    if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
 232        SET_QC();
 233        res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
 234    }
 235    return res;
 236}
 237
 238/* Unsigned saturating accumulate of signed value
 239 *
 240 * Op1/Rn is treated as signed
 241 * Op2/Rd is treated as unsigned
 242 *
 243 * Explicit casting is used to ensure the correct sign extension of
 244 * inputs. The result is treated as a unsigned value and saturated as such.
 245 *
 246 * We use a macro for the 8/16 bit cases which expects signed integers of va,
 247 * vb, and vr for interim calculation and an unsigned 32 bit result value r.
 248 */
 249
 250#define USATACC(bits, shift) \
 251    do { \
 252        va = sextract32(a, shift, bits);                                \
 253        vb = extract32(b, shift, bits);                                 \
 254        vr = va + vb;                                                   \
 255        if (vr > UINT##bits##_MAX) {                                    \
 256            SET_QC();                                                   \
 257            vr = UINT##bits##_MAX;                                      \
 258        } else if (vr < 0) {                                            \
 259            SET_QC();                                                   \
 260            vr = 0;                                                     \
 261        }                                                               \
 262        r = deposit32(r, shift, bits, vr);                              \
 263   } while (0)
 264
 265uint32_t HELPER(neon_uqadd_s8)(CPUARMState *env, uint32_t a, uint32_t b)
 266{
 267    int16_t va, vb, vr;
 268    uint32_t r = 0;
 269
 270    USATACC(8, 0);
 271    USATACC(8, 8);
 272    USATACC(8, 16);
 273    USATACC(8, 24);
 274    return r;
 275}
 276
 277uint32_t HELPER(neon_uqadd_s16)(CPUARMState *env, uint32_t a, uint32_t b)
 278{
 279    int32_t va, vb, vr;
 280    uint64_t r = 0;
 281
 282    USATACC(16, 0);
 283    USATACC(16, 16);
 284    return r;
 285}
 286
 287#undef USATACC
 288
 289uint32_t HELPER(neon_uqadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
 290{
 291    int64_t va = (int32_t)a;
 292    int64_t vb = (uint32_t)b;
 293    int64_t vr = va + vb;
 294    if (vr > UINT32_MAX) {
 295        SET_QC();
 296        vr = UINT32_MAX;
 297    } else if (vr < 0) {
 298        SET_QC();
 299        vr = 0;
 300    }
 301    return vr;
 302}
 303
 304uint64_t HELPER(neon_uqadd_s64)(CPUARMState *env, uint64_t a, uint64_t b)
 305{
 306    uint64_t res;
 307    res = a + b;
 308    /* We only need to look at the pattern of SIGN bits to detect
 309     * +ve/-ve saturation
 310     */
 311    if (~a & b & ~res & SIGNBIT64) {
 312        SET_QC();
 313        res = UINT64_MAX;
 314    } else if (a & ~b & res & SIGNBIT64) {
 315        SET_QC();
 316        res = 0;
 317    }
 318    return res;
 319}
 320
 321/* Signed saturating accumulate of unsigned value
 322 *
 323 * Op1/Rn is treated as unsigned
 324 * Op2/Rd is treated as signed
 325 *
 326 * The result is treated as a signed value and saturated as such
 327 *
 328 * We use a macro for the 8/16 bit cases which expects signed integers of va,
 329 * vb, and vr for interim calculation and an unsigned 32 bit result value r.
 330 */
 331
 332#define SSATACC(bits, shift) \
 333    do { \
 334        va = extract32(a, shift, bits);                                 \
 335        vb = sextract32(b, shift, bits);                                \
 336        vr = va + vb;                                                   \
 337        if (vr > INT##bits##_MAX) {                                     \
 338            SET_QC();                                                   \
 339            vr = INT##bits##_MAX;                                       \
 340        } else if (vr < INT##bits##_MIN) {                              \
 341            SET_QC();                                                   \
 342            vr = INT##bits##_MIN;                                       \
 343        }                                                               \
 344        r = deposit32(r, shift, bits, vr);                              \
 345    } while (0)
 346
 347uint32_t HELPER(neon_sqadd_u8)(CPUARMState *env, uint32_t a, uint32_t b)
 348{
 349    int16_t va, vb, vr;
 350    uint32_t r = 0;
 351
 352    SSATACC(8, 0);
 353    SSATACC(8, 8);
 354    SSATACC(8, 16);
 355    SSATACC(8, 24);
 356    return r;
 357}
 358
 359uint32_t HELPER(neon_sqadd_u16)(CPUARMState *env, uint32_t a, uint32_t b)
 360{
 361    int32_t va, vb, vr;
 362    uint32_t r = 0;
 363
 364    SSATACC(16, 0);
 365    SSATACC(16, 16);
 366
 367    return r;
 368}
 369
 370#undef SSATACC
 371
 372uint32_t HELPER(neon_sqadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
 373{
 374    int64_t res;
 375    int64_t op1 = (uint32_t)a;
 376    int64_t op2 = (int32_t)b;
 377    res = op1 + op2;
 378    if (res > INT32_MAX) {
 379        SET_QC();
 380        res = INT32_MAX;
 381    } else if (res < INT32_MIN) {
 382        SET_QC();
 383        res = INT32_MIN;
 384    }
 385    return res;
 386}
 387
 388uint64_t HELPER(neon_sqadd_u64)(CPUARMState *env, uint64_t a, uint64_t b)
 389{
 390    uint64_t res;
 391    res = a + b;
 392    /* We only need to look at the pattern of SIGN bits to detect an overflow */
 393    if (((a & res)
 394         | (~b & res)
 395         | (a & ~b)) & SIGNBIT64) {
 396        SET_QC();
 397        res = INT64_MAX;
 398    }
 399    return res;
 400}
 401
 402
 403#define NEON_USAT(dest, src1, src2, type) do { \
 404    uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
 405    if (tmp != (type)tmp) { \
 406        SET_QC(); \
 407        dest = 0; \
 408    } else { \
 409        dest = tmp; \
 410    }} while(0)
 411#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
 412NEON_VOP_ENV(qsub_u8, neon_u8, 4)
 413#undef NEON_FN
 414#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
 415NEON_VOP_ENV(qsub_u16, neon_u16, 2)
 416#undef NEON_FN
 417#undef NEON_USAT
 418
 419uint32_t HELPER(neon_qsub_u32)(CPUARMState *env, uint32_t a, uint32_t b)
 420{
 421    uint32_t res = a - b;
 422    if (res > a) {
 423        SET_QC();
 424        res = 0;
 425    }
 426    return res;
 427}
 428
 429uint64_t HELPER(neon_qsub_u64)(CPUARMState *env, uint64_t src1, uint64_t src2)
 430{
 431    uint64_t res;
 432
 433    if (src1 < src2) {
 434        SET_QC();
 435        res = 0;
 436    } else {
 437        res = src1 - src2;
 438    }
 439    return res;
 440}
 441
 442#define NEON_SSAT(dest, src1, src2, type) do { \
 443    int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
 444    if (tmp != (type)tmp) { \
 445        SET_QC(); \
 446        if (src2 < 0) { \
 447            tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
 448        } else { \
 449            tmp = 1 << (sizeof(type) * 8 - 1); \
 450        } \
 451    } \
 452    dest = tmp; \
 453    } while(0)
 454#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
 455NEON_VOP_ENV(qsub_s8, neon_s8, 4)
 456#undef NEON_FN
 457#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
 458NEON_VOP_ENV(qsub_s16, neon_s16, 2)
 459#undef NEON_FN
 460#undef NEON_SSAT
 461
 462uint32_t HELPER(neon_qsub_s32)(CPUARMState *env, uint32_t a, uint32_t b)
 463{
 464    uint32_t res = a - b;
 465    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
 466        SET_QC();
 467        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 468    }
 469    return res;
 470}
 471
 472uint64_t HELPER(neon_qsub_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
 473{
 474    uint64_t res;
 475
 476    res = src1 - src2;
 477    if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
 478        SET_QC();
 479        res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
 480    }
 481    return res;
 482}
 483
 484#define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
 485NEON_VOP(hadd_s8, neon_s8, 4)
 486NEON_VOP(hadd_u8, neon_u8, 4)
 487NEON_VOP(hadd_s16, neon_s16, 2)
 488NEON_VOP(hadd_u16, neon_u16, 2)
 489#undef NEON_FN
 490
 491int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2)
 492{
 493    int32_t dest;
 494
 495    dest = (src1 >> 1) + (src2 >> 1);
 496    if (src1 & src2 & 1)
 497        dest++;
 498    return dest;
 499}
 500
 501uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2)
 502{
 503    uint32_t dest;
 504
 505    dest = (src1 >> 1) + (src2 >> 1);
 506    if (src1 & src2 & 1)
 507        dest++;
 508    return dest;
 509}
 510
 511#define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
 512NEON_VOP(rhadd_s8, neon_s8, 4)
 513NEON_VOP(rhadd_u8, neon_u8, 4)
 514NEON_VOP(rhadd_s16, neon_s16, 2)
 515NEON_VOP(rhadd_u16, neon_u16, 2)
 516#undef NEON_FN
 517
 518int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2)
 519{
 520    int32_t dest;
 521
 522    dest = (src1 >> 1) + (src2 >> 1);
 523    if ((src1 | src2) & 1)
 524        dest++;
 525    return dest;
 526}
 527
 528uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2)
 529{
 530    uint32_t dest;
 531
 532    dest = (src1 >> 1) + (src2 >> 1);
 533    if ((src1 | src2) & 1)
 534        dest++;
 535    return dest;
 536}
 537
 538#define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
 539NEON_VOP(hsub_s8, neon_s8, 4)
 540NEON_VOP(hsub_u8, neon_u8, 4)
 541NEON_VOP(hsub_s16, neon_s16, 2)
 542NEON_VOP(hsub_u16, neon_u16, 2)
 543#undef NEON_FN
 544
 545int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2)
 546{
 547    int32_t dest;
 548
 549    dest = (src1 >> 1) - (src2 >> 1);
 550    if ((~src1) & src2 & 1)
 551        dest--;
 552    return dest;
 553}
 554
 555uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
 556{
 557    uint32_t dest;
 558
 559    dest = (src1 >> 1) - (src2 >> 1);
 560    if ((~src1) & src2 & 1)
 561        dest--;
 562    return dest;
 563}
 564
 565#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
 566NEON_POP(pmin_s8, neon_s8, 4)
 567NEON_POP(pmin_u8, neon_u8, 4)
 568NEON_POP(pmin_s16, neon_s16, 2)
 569NEON_POP(pmin_u16, neon_u16, 2)
 570#undef NEON_FN
 571
 572#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
 573NEON_POP(pmax_s8, neon_s8, 4)
 574NEON_POP(pmax_u8, neon_u8, 4)
 575NEON_POP(pmax_s16, neon_s16, 2)
 576NEON_POP(pmax_u16, neon_u16, 2)
 577#undef NEON_FN
 578
 579#define NEON_FN(dest, src1, src2) do { \
 580    int8_t tmp; \
 581    tmp = (int8_t)src2; \
 582    if (tmp >= (ssize_t)sizeof(src1) * 8 || \
 583        tmp <= -(ssize_t)sizeof(src1) * 8) { \
 584        dest = 0; \
 585    } else if (tmp < 0) { \
 586        dest = src1 >> -tmp; \
 587    } else { \
 588        dest = src1 << tmp; \
 589    }} while (0)
 590NEON_VOP(shl_u16, neon_u16, 2)
 591#undef NEON_FN
 592
 593#define NEON_FN(dest, src1, src2) do { \
 594    int8_t tmp; \
 595    tmp = (int8_t)src2; \
 596    if (tmp >= (ssize_t)sizeof(src1) * 8) { \
 597        dest = 0; \
 598    } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
 599        dest = src1 >> (sizeof(src1) * 8 - 1); \
 600    } else if (tmp < 0) { \
 601        dest = src1 >> -tmp; \
 602    } else { \
 603        dest = src1 << tmp; \
 604    }} while (0)
 605NEON_VOP(shl_s16, neon_s16, 2)
 606#undef NEON_FN
 607
 608#define NEON_FN(dest, src1, src2) do { \
 609    int8_t tmp; \
 610    tmp = (int8_t)src2; \
 611    if ((tmp >= (ssize_t)sizeof(src1) * 8) \
 612        || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \
 613        dest = 0; \
 614    } else if (tmp < 0) { \
 615        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
 616    } else { \
 617        dest = src1 << tmp; \
 618    }} while (0)
 619NEON_VOP(rshl_s8, neon_s8, 4)
 620NEON_VOP(rshl_s16, neon_s16, 2)
 621#undef NEON_FN
 622
 623/* The addition of the rounding constant may overflow, so we use an
 624 * intermediate 64 bit accumulator.  */
 625uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
 626{
 627    int32_t dest;
 628    int32_t val = (int32_t)valop;
 629    int8_t shift = (int8_t)shiftop;
 630    if ((shift >= 32) || (shift <= -32)) {
 631        dest = 0;
 632    } else if (shift < 0) {
 633        int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
 634        dest = big_dest >> -shift;
 635    } else {
 636        dest = val << shift;
 637    }
 638    return dest;
 639}
 640
 641/* Handling addition overflow with 64 bit input values is more
 642 * tricky than with 32 bit values.  */
 643uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
 644{
 645    int8_t shift = (int8_t)shiftop;
 646    int64_t val = valop;
 647    if ((shift >= 64) || (shift <= -64)) {
 648        val = 0;
 649    } else if (shift < 0) {
 650        val >>= (-shift - 1);
 651        if (val == INT64_MAX) {
 652            /* In this case, it means that the rounding constant is 1,
 653             * and the addition would overflow. Return the actual
 654             * result directly.  */
 655            val = 0x4000000000000000LL;
 656        } else {
 657            val++;
 658            val >>= 1;
 659        }
 660    } else {
 661        val <<= shift;
 662    }
 663    return val;
 664}
 665
 666#define NEON_FN(dest, src1, src2) do { \
 667    int8_t tmp; \
 668    tmp = (int8_t)src2; \
 669    if (tmp >= (ssize_t)sizeof(src1) * 8 || \
 670        tmp < -(ssize_t)sizeof(src1) * 8) { \
 671        dest = 0; \
 672    } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
 673        dest = src1 >> (-tmp - 1); \
 674    } else if (tmp < 0) { \
 675        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
 676    } else { \
 677        dest = src1 << tmp; \
 678    }} while (0)
 679NEON_VOP(rshl_u8, neon_u8, 4)
 680NEON_VOP(rshl_u16, neon_u16, 2)
 681#undef NEON_FN
 682
 683/* The addition of the rounding constant may overflow, so we use an
 684 * intermediate 64 bit accumulator.  */
 685uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
 686{
 687    uint32_t dest;
 688    int8_t shift = (int8_t)shiftop;
 689    if (shift >= 32 || shift < -32) {
 690        dest = 0;
 691    } else if (shift == -32) {
 692        dest = val >> 31;
 693    } else if (shift < 0) {
 694        uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
 695        dest = big_dest >> -shift;
 696    } else {
 697        dest = val << shift;
 698    }
 699    return dest;
 700}
 701
 702/* Handling addition overflow with 64 bit input values is more
 703 * tricky than with 32 bit values.  */
 704uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
 705{
 706    int8_t shift = (uint8_t)shiftop;
 707    if (shift >= 64 || shift < -64) {
 708        val = 0;
 709    } else if (shift == -64) {
 710        /* Rounding a 1-bit result just preserves that bit.  */
 711        val >>= 63;
 712    } else if (shift < 0) {
 713        val >>= (-shift - 1);
 714        if (val == UINT64_MAX) {
 715            /* In this case, it means that the rounding constant is 1,
 716             * and the addition would overflow. Return the actual
 717             * result directly.  */
 718            val = 0x8000000000000000ULL;
 719        } else {
 720            val++;
 721            val >>= 1;
 722        }
 723    } else {
 724        val <<= shift;
 725    }
 726    return val;
 727}
 728
 729#define NEON_FN(dest, src1, src2) do { \
 730    int8_t tmp; \
 731    tmp = (int8_t)src2; \
 732    if (tmp >= (ssize_t)sizeof(src1) * 8) { \
 733        if (src1) { \
 734            SET_QC(); \
 735            dest = ~0; \
 736        } else { \
 737            dest = 0; \
 738        } \
 739    } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
 740        dest = 0; \
 741    } else if (tmp < 0) { \
 742        dest = src1 >> -tmp; \
 743    } else { \
 744        dest = src1 << tmp; \
 745        if ((dest >> tmp) != src1) { \
 746            SET_QC(); \
 747            dest = ~0; \
 748        } \
 749    }} while (0)
 750NEON_VOP_ENV(qshl_u8, neon_u8, 4)
 751NEON_VOP_ENV(qshl_u16, neon_u16, 2)
 752NEON_VOP_ENV(qshl_u32, neon_u32, 1)
 753#undef NEON_FN
 754
 755uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
 756{
 757    int8_t shift = (int8_t)shiftop;
 758    if (shift >= 64) {
 759        if (val) {
 760            val = ~(uint64_t)0;
 761            SET_QC();
 762        }
 763    } else if (shift <= -64) {
 764        val = 0;
 765    } else if (shift < 0) {
 766        val >>= -shift;
 767    } else {
 768        uint64_t tmp = val;
 769        val <<= shift;
 770        if ((val >> shift) != tmp) {
 771            SET_QC();
 772            val = ~(uint64_t)0;
 773        }
 774    }
 775    return val;
 776}
 777
 778#define NEON_FN(dest, src1, src2) do { \
 779    int8_t tmp; \
 780    tmp = (int8_t)src2; \
 781    if (tmp >= (ssize_t)sizeof(src1) * 8) { \
 782        if (src1) { \
 783            SET_QC(); \
 784            dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
 785            if (src1 > 0) { \
 786                dest--; \
 787            } \
 788        } else { \
 789            dest = src1; \
 790        } \
 791    } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
 792        dest = src1 >> 31; \
 793    } else if (tmp < 0) { \
 794        dest = src1 >> -tmp; \
 795    } else { \
 796        dest = src1 << tmp; \
 797        if ((dest >> tmp) != src1) { \
 798            SET_QC(); \
 799            dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
 800            if (src1 > 0) { \
 801                dest--; \
 802            } \
 803        } \
 804    }} while (0)
 805NEON_VOP_ENV(qshl_s8, neon_s8, 4)
 806NEON_VOP_ENV(qshl_s16, neon_s16, 2)
 807NEON_VOP_ENV(qshl_s32, neon_s32, 1)
 808#undef NEON_FN
 809
 810uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
 811{
 812    int8_t shift = (uint8_t)shiftop;
 813    int64_t val = valop;
 814    if (shift >= 64) {
 815        if (val) {
 816            SET_QC();
 817            val = (val >> 63) ^ ~SIGNBIT64;
 818        }
 819    } else if (shift <= -64) {
 820        val >>= 63;
 821    } else if (shift < 0) {
 822        val >>= -shift;
 823    } else {
 824        int64_t tmp = val;
 825        val <<= shift;
 826        if ((val >> shift) != tmp) {
 827            SET_QC();
 828            val = (tmp >> 63) ^ ~SIGNBIT64;
 829        }
 830    }
 831    return val;
 832}
 833
 834#define NEON_FN(dest, src1, src2) do { \
 835    if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
 836        SET_QC(); \
 837        dest = 0; \
 838    } else { \
 839        int8_t tmp; \
 840        tmp = (int8_t)src2; \
 841        if (tmp >= (ssize_t)sizeof(src1) * 8) { \
 842            if (src1) { \
 843                SET_QC(); \
 844                dest = ~0; \
 845            } else { \
 846                dest = 0; \
 847            } \
 848        } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
 849            dest = 0; \
 850        } else if (tmp < 0) { \
 851            dest = src1 >> -tmp; \
 852        } else { \
 853            dest = src1 << tmp; \
 854            if ((dest >> tmp) != src1) { \
 855                SET_QC(); \
 856                dest = ~0; \
 857            } \
 858        } \
 859    }} while (0)
 860NEON_VOP_ENV(qshlu_s8, neon_u8, 4)
 861NEON_VOP_ENV(qshlu_s16, neon_u16, 2)
 862#undef NEON_FN
 863
 864uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
 865{
 866    if ((int32_t)valop < 0) {
 867        SET_QC();
 868        return 0;
 869    }
 870    return helper_neon_qshl_u32(env, valop, shiftop);
 871}
 872
 873uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
 874{
 875    if ((int64_t)valop < 0) {
 876        SET_QC();
 877        return 0;
 878    }
 879    return helper_neon_qshl_u64(env, valop, shiftop);
 880}
 881
 882#define NEON_FN(dest, src1, src2) do { \
 883    int8_t tmp; \
 884    tmp = (int8_t)src2; \
 885    if (tmp >= (ssize_t)sizeof(src1) * 8) { \
 886        if (src1) { \
 887            SET_QC(); \
 888            dest = ~0; \
 889        } else { \
 890            dest = 0; \
 891        } \
 892    } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \
 893        dest = 0; \
 894    } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
 895        dest = src1 >> (sizeof(src1) * 8 - 1); \
 896    } else if (tmp < 0) { \
 897        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
 898    } else { \
 899        dest = src1 << tmp; \
 900        if ((dest >> tmp) != src1) { \
 901            SET_QC(); \
 902            dest = ~0; \
 903        } \
 904    }} while (0)
 905NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
 906NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
 907#undef NEON_FN
 908
 909/* The addition of the rounding constant may overflow, so we use an
 910 * intermediate 64 bit accumulator.  */
 911uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop)
 912{
 913    uint32_t dest;
 914    int8_t shift = (int8_t)shiftop;
 915    if (shift >= 32) {
 916        if (val) {
 917            SET_QC();
 918            dest = ~0;
 919        } else {
 920            dest = 0;
 921        }
 922    } else if (shift < -32) {
 923        dest = 0;
 924    } else if (shift == -32) {
 925        dest = val >> 31;
 926    } else if (shift < 0) {
 927        uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
 928        dest = big_dest >> -shift;
 929    } else {
 930        dest = val << shift;
 931        if ((dest >> shift) != val) {
 932            SET_QC();
 933            dest = ~0;
 934        }
 935    }
 936    return dest;
 937}
 938
 939/* Handling addition overflow with 64 bit input values is more
 940 * tricky than with 32 bit values.  */
 941uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
 942{
 943    int8_t shift = (int8_t)shiftop;
 944    if (shift >= 64) {
 945        if (val) {
 946            SET_QC();
 947            val = ~0;
 948        }
 949    } else if (shift < -64) {
 950        val = 0;
 951    } else if (shift == -64) {
 952        val >>= 63;
 953    } else if (shift < 0) {
 954        val >>= (-shift - 1);
 955        if (val == UINT64_MAX) {
 956            /* In this case, it means that the rounding constant is 1,
 957             * and the addition would overflow. Return the actual
 958             * result directly.  */
 959            val = 0x8000000000000000ULL;
 960        } else {
 961            val++;
 962            val >>= 1;
 963        }
 964    } else { \
 965        uint64_t tmp = val;
 966        val <<= shift;
 967        if ((val >> shift) != tmp) {
 968            SET_QC();
 969            val = ~0;
 970        }
 971    }
 972    return val;
 973}
 974
 975#define NEON_FN(dest, src1, src2) do { \
 976    int8_t tmp; \
 977    tmp = (int8_t)src2; \
 978    if (tmp >= (ssize_t)sizeof(src1) * 8) { \
 979        if (src1) { \
 980            SET_QC(); \
 981            dest = (typeof(dest))(1 << (sizeof(src1) * 8 - 1)); \
 982            if (src1 > 0) { \
 983                dest--; \
 984            } \
 985        } else { \
 986            dest = 0; \
 987        } \
 988    } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
 989        dest = 0; \
 990    } else if (tmp < 0) { \
 991        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
 992    } else { \
 993        dest = src1 << tmp; \
 994        if ((dest >> tmp) != src1) { \
 995            SET_QC(); \
 996            dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
 997            if (src1 > 0) { \
 998                dest--; \
 999            } \
1000        } \
1001    }} while (0)
1002NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
1003NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
1004#undef NEON_FN
1005
1006/* The addition of the rounding constant may overflow, so we use an
1007 * intermediate 64 bit accumulator.  */
1008uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
1009{
1010    int32_t dest;
1011    int32_t val = (int32_t)valop;
1012    int8_t shift = (int8_t)shiftop;
1013    if (shift >= 32) {
1014        if (val) {
1015            SET_QC();
1016            dest = (val >> 31) ^ ~SIGNBIT;
1017        } else {
1018            dest = 0;
1019        }
1020    } else if (shift <= -32) {
1021        dest = 0;
1022    } else if (shift < 0) {
1023        int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
1024        dest = big_dest >> -shift;
1025    } else {
1026        dest = val << shift;
1027        if ((dest >> shift) != val) {
1028            SET_QC();
1029            dest = (val >> 31) ^ ~SIGNBIT;
1030        }
1031    }
1032    return dest;
1033}
1034
1035/* Handling addition overflow with 64 bit input values is more
1036 * tricky than with 32 bit values.  */
1037uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
1038{
1039    int8_t shift = (uint8_t)shiftop;
1040    int64_t val = valop;
1041
1042    if (shift >= 64) {
1043        if (val) {
1044            SET_QC();
1045            val = (val >> 63) ^ ~SIGNBIT64;
1046        }
1047    } else if (shift <= -64) {
1048        val = 0;
1049    } else if (shift < 0) {
1050        val >>= (-shift - 1);
1051        if (val == INT64_MAX) {
1052            /* In this case, it means that the rounding constant is 1,
1053             * and the addition would overflow. Return the actual
1054             * result directly.  */
1055            val = 0x4000000000000000ULL;
1056        } else {
1057            val++;
1058            val >>= 1;
1059        }
1060    } else {
1061        int64_t tmp = val;
1062        val <<= shift;
1063        if ((val >> shift) != tmp) {
1064            SET_QC();
1065            val = (tmp >> 63) ^ ~SIGNBIT64;
1066        }
1067    }
1068    return val;
1069}
1070
1071uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
1072{
1073    uint32_t mask;
1074    mask = (a ^ b) & 0x80808080u;
1075    a &= ~0x80808080u;
1076    b &= ~0x80808080u;
1077    return (a + b) ^ mask;
1078}
1079
1080uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b)
1081{
1082    uint32_t mask;
1083    mask = (a ^ b) & 0x80008000u;
1084    a &= ~0x80008000u;
1085    b &= ~0x80008000u;
1086    return (a + b) ^ mask;
1087}
1088
1089#define NEON_FN(dest, src1, src2) dest = src1 + src2
1090NEON_POP(padd_u8, neon_u8, 4)
1091NEON_POP(padd_u16, neon_u16, 2)
1092#undef NEON_FN
1093
1094#define NEON_FN(dest, src1, src2) dest = src1 - src2
1095NEON_VOP(sub_u8, neon_u8, 4)
1096NEON_VOP(sub_u16, neon_u16, 2)
1097#undef NEON_FN
1098
1099#define NEON_FN(dest, src1, src2) dest = src1 * src2
1100NEON_VOP(mul_u8, neon_u8, 4)
1101NEON_VOP(mul_u16, neon_u16, 2)
1102#undef NEON_FN
1103
1104#define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
1105NEON_VOP(tst_u8, neon_u8, 4)
1106NEON_VOP(tst_u16, neon_u16, 2)
1107NEON_VOP(tst_u32, neon_u32, 1)
1108#undef NEON_FN
1109
1110/* Count Leading Sign/Zero Bits.  */
1111static inline int do_clz8(uint8_t x)
1112{
1113    int n;
1114    for (n = 8; x; n--)
1115        x >>= 1;
1116    return n;
1117}
1118
1119static inline int do_clz16(uint16_t x)
1120{
1121    int n;
1122    for (n = 16; x; n--)
1123        x >>= 1;
1124    return n;
1125}
1126
1127#define NEON_FN(dest, src, dummy) dest = do_clz8(src)
1128NEON_VOP1(clz_u8, neon_u8, 4)
1129#undef NEON_FN
1130
1131#define NEON_FN(dest, src, dummy) dest = do_clz16(src)
1132NEON_VOP1(clz_u16, neon_u16, 2)
1133#undef NEON_FN
1134
1135#define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
1136NEON_VOP1(cls_s8, neon_s8, 4)
1137#undef NEON_FN
1138
1139#define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
1140NEON_VOP1(cls_s16, neon_s16, 2)
1141#undef NEON_FN
1142
1143uint32_t HELPER(neon_cls_s32)(uint32_t x)
1144{
1145    int count;
1146    if ((int32_t)x < 0)
1147        x = ~x;
1148    for (count = 32; x; count--)
1149        x = x >> 1;
1150    return count - 1;
1151}
1152
1153/* Bit count.  */
1154uint32_t HELPER(neon_cnt_u8)(uint32_t x)
1155{
1156    x = (x & 0x55555555) + ((x >>  1) & 0x55555555);
1157    x = (x & 0x33333333) + ((x >>  2) & 0x33333333);
1158    x = (x & 0x0f0f0f0f) + ((x >>  4) & 0x0f0f0f0f);
1159    return x;
1160}
1161
1162/* Reverse bits in each 8 bit word */
1163uint32_t HELPER(neon_rbit_u8)(uint32_t x)
1164{
1165    x =  ((x & 0xf0f0f0f0) >> 4)
1166       | ((x & 0x0f0f0f0f) << 4);
1167    x =  ((x & 0x88888888) >> 3)
1168       | ((x & 0x44444444) >> 1)
1169       | ((x & 0x22222222) << 1)
1170       | ((x & 0x11111111) << 3);
1171    return x;
1172}
1173
1174#define NEON_QDMULH16(dest, src1, src2, round) do { \
1175    uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
1176    if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
1177        SET_QC(); \
1178        tmp = (tmp >> 31) ^ ~SIGNBIT; \
1179    } else { \
1180        tmp <<= 1; \
1181    } \
1182    if (round) { \
1183        int32_t old = tmp; \
1184        tmp += 1 << 15; \
1185        if ((int32_t)tmp < old) { \
1186            SET_QC(); \
1187            tmp = SIGNBIT - 1; \
1188        } \
1189    } \
1190    dest = tmp >> 16; \
1191    } while(0)
1192#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
1193NEON_VOP_ENV(qdmulh_s16, neon_s16, 2)
1194#undef NEON_FN
1195#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
1196NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
1197#undef NEON_FN
1198#undef NEON_QDMULH16
1199
1200#define NEON_QDMULH32(dest, src1, src2, round) do { \
1201    uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
1202    if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
1203        SET_QC(); \
1204        tmp = (tmp >> 63) ^ ~SIGNBIT64; \
1205    } else { \
1206        tmp <<= 1; \
1207    } \
1208    if (round) { \
1209        int64_t old = tmp; \
1210        tmp += (int64_t)1 << 31; \
1211        if ((int64_t)tmp < old) { \
1212            SET_QC(); \
1213            tmp = SIGNBIT64 - 1; \
1214        } \
1215    } \
1216    dest = tmp >> 32; \
1217    } while(0)
1218#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
1219NEON_VOP_ENV(qdmulh_s32, neon_s32, 1)
1220#undef NEON_FN
1221#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
1222NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
1223#undef NEON_FN
1224#undef NEON_QDMULH32
1225
1226uint32_t HELPER(neon_narrow_u8)(uint64_t x)
1227{
1228    return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u)
1229           | ((x >> 24) & 0xff000000u);
1230}
1231
1232uint32_t HELPER(neon_narrow_u16)(uint64_t x)
1233{
1234    return (x & 0xffffu) | ((x >> 16) & 0xffff0000u);
1235}
1236
1237uint32_t HELPER(neon_narrow_high_u8)(uint64_t x)
1238{
1239    return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
1240            | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
1241}
1242
1243uint32_t HELPER(neon_narrow_high_u16)(uint64_t x)
1244{
1245    return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
1246}
1247
1248uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x)
1249{
1250    x &= 0xff80ff80ff80ff80ull;
1251    x += 0x0080008000800080ull;
1252    return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
1253            | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
1254}
1255
1256uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
1257{
1258    x &= 0xffff8000ffff8000ull;
1259    x += 0x0000800000008000ull;
1260    return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
1261}
1262
1263uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x)
1264{
1265    uint16_t s;
1266    uint8_t d;
1267    uint32_t res = 0;
1268#define SAT8(n) \
1269    s = x >> n; \
1270    if (s & 0x8000) { \
1271        SET_QC(); \
1272    } else { \
1273        if (s > 0xff) { \
1274            d = 0xff; \
1275            SET_QC(); \
1276        } else  { \
1277            d = s; \
1278        } \
1279        res |= (uint32_t)d << (n / 2); \
1280    }
1281
1282    SAT8(0);
1283    SAT8(16);
1284    SAT8(32);
1285    SAT8(48);
1286#undef SAT8
1287    return res;
1288}
1289
1290uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x)
1291{
1292    uint16_t s;
1293    uint8_t d;
1294    uint32_t res = 0;
1295#define SAT8(n) \
1296    s = x >> n; \
1297    if (s > 0xff) { \
1298        d = 0xff; \
1299        SET_QC(); \
1300    } else  { \
1301        d = s; \
1302    } \
1303    res |= (uint32_t)d << (n / 2);
1304
1305    SAT8(0);
1306    SAT8(16);
1307    SAT8(32);
1308    SAT8(48);
1309#undef SAT8
1310    return res;
1311}
1312
1313uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x)
1314{
1315    int16_t s;
1316    uint8_t d;
1317    uint32_t res = 0;
1318#define SAT8(n) \
1319    s = x >> n; \
1320    if (s != (int8_t)s) { \
1321        d = (s >> 15) ^ 0x7f; \
1322        SET_QC(); \
1323    } else  { \
1324        d = s; \
1325    } \
1326    res |= (uint32_t)d << (n / 2);
1327
1328    SAT8(0);
1329    SAT8(16);
1330    SAT8(32);
1331    SAT8(48);
1332#undef SAT8
1333    return res;
1334}
1335
1336uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x)
1337{
1338    uint32_t high;
1339    uint32_t low;
1340    low = x;
1341    if (low & 0x80000000) {
1342        low = 0;
1343        SET_QC();
1344    } else if (low > 0xffff) {
1345        low = 0xffff;
1346        SET_QC();
1347    }
1348    high = x >> 32;
1349    if (high & 0x80000000) {
1350        high = 0;
1351        SET_QC();
1352    } else if (high > 0xffff) {
1353        high = 0xffff;
1354        SET_QC();
1355    }
1356    return low | (high << 16);
1357}
1358
1359uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x)
1360{
1361    uint32_t high;
1362    uint32_t low;
1363    low = x;
1364    if (low > 0xffff) {
1365        low = 0xffff;
1366        SET_QC();
1367    }
1368    high = x >> 32;
1369    if (high > 0xffff) {
1370        high = 0xffff;
1371        SET_QC();
1372    }
1373    return low | (high << 16);
1374}
1375
1376uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x)
1377{
1378    int32_t low;
1379    int32_t high;
1380    low = x;
1381    if (low != (int16_t)low) {
1382        low = (low >> 31) ^ 0x7fff;
1383        SET_QC();
1384    }
1385    high = x >> 32;
1386    if (high != (int16_t)high) {
1387        high = (high >> 31) ^ 0x7fff;
1388        SET_QC();
1389    }
1390    return (uint16_t)low | (high << 16);
1391}
1392
1393uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x)
1394{
1395    if (x & 0x8000000000000000ull) {
1396        SET_QC();
1397        return 0;
1398    }
1399    if (x > 0xffffffffu) {
1400        SET_QC();
1401        return 0xffffffffu;
1402    }
1403    return x;
1404}
1405
1406uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x)
1407{
1408    if (x > 0xffffffffu) {
1409        SET_QC();
1410        return 0xffffffffu;
1411    }
1412    return x;
1413}
1414
1415uint32_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x)
1416{
1417    if ((int64_t)x != (int32_t)x) {
1418        SET_QC();
1419        return ((int64_t)x >> 63) ^ 0x7fffffff;
1420    }
1421    return x;
1422}
1423
1424uint64_t HELPER(neon_widen_u8)(uint32_t x)
1425{
1426    uint64_t tmp;
1427    uint64_t ret;
1428    ret = (uint8_t)x;
1429    tmp = (uint8_t)(x >> 8);
1430    ret |= tmp << 16;
1431    tmp = (uint8_t)(x >> 16);
1432    ret |= tmp << 32;
1433    tmp = (uint8_t)(x >> 24);
1434    ret |= tmp << 48;
1435    return ret;
1436}
1437
1438uint64_t HELPER(neon_widen_s8)(uint32_t x)
1439{
1440    uint64_t tmp;
1441    uint64_t ret;
1442    ret = (uint16_t)(int8_t)x;
1443    tmp = (uint16_t)(int8_t)(x >> 8);
1444    ret |= tmp << 16;
1445    tmp = (uint16_t)(int8_t)(x >> 16);
1446    ret |= tmp << 32;
1447    tmp = (uint16_t)(int8_t)(x >> 24);
1448    ret |= tmp << 48;
1449    return ret;
1450}
1451
1452uint64_t HELPER(neon_widen_u16)(uint32_t x)
1453{
1454    uint64_t high = (uint16_t)(x >> 16);
1455    return ((uint16_t)x) | (high << 32);
1456}
1457
1458uint64_t HELPER(neon_widen_s16)(uint32_t x)
1459{
1460    uint64_t high = (int16_t)(x >> 16);
1461    return ((uint32_t)(int16_t)x) | (high << 32);
1462}
1463
1464uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b)
1465{
1466    uint64_t mask;
1467    mask = (a ^ b) & 0x8000800080008000ull;
1468    a &= ~0x8000800080008000ull;
1469    b &= ~0x8000800080008000ull;
1470    return (a + b) ^ mask;
1471}
1472
1473uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
1474{
1475    uint64_t mask;
1476    mask = (a ^ b) & 0x8000000080000000ull;
1477    a &= ~0x8000000080000000ull;
1478    b &= ~0x8000000080000000ull;
1479    return (a + b) ^ mask;
1480}
1481
1482uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
1483{
1484    uint64_t tmp;
1485    uint64_t tmp2;
1486
1487    tmp = a & 0x0000ffff0000ffffull;
1488    tmp += (a >> 16) & 0x0000ffff0000ffffull;
1489    tmp2 = b & 0xffff0000ffff0000ull;
1490    tmp2 += (b << 16) & 0xffff0000ffff0000ull;
1491    return    ( tmp         & 0xffff)
1492            | ((tmp  >> 16) & 0xffff0000ull)
1493            | ((tmp2 << 16) & 0xffff00000000ull)
1494            | ( tmp2        & 0xffff000000000000ull);
1495}
1496
1497uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
1498{
1499    uint32_t low = a + (a >> 32);
1500    uint32_t high = b + (b >> 32);
1501    return low + ((uint64_t)high << 32);
1502}
1503
1504uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b)
1505{
1506    uint64_t mask;
1507    mask = (a ^ ~b) & 0x8000800080008000ull;
1508    a |= 0x8000800080008000ull;
1509    b &= ~0x8000800080008000ull;
1510    return (a - b) ^ mask;
1511}
1512
1513uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
1514{
1515    uint64_t mask;
1516    mask = (a ^ ~b) & 0x8000000080000000ull;
1517    a |= 0x8000000080000000ull;
1518    b &= ~0x8000000080000000ull;
1519    return (a - b) ^ mask;
1520}
1521
1522uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b)
1523{
1524    uint32_t x, y;
1525    uint32_t low, high;
1526
1527    x = a;
1528    y = b;
1529    low = x + y;
1530    if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
1531        SET_QC();
1532        low = ((int32_t)x >> 31) ^ ~SIGNBIT;
1533    }
1534    x = a >> 32;
1535    y = b >> 32;
1536    high = x + y;
1537    if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
1538        SET_QC();
1539        high = ((int32_t)x >> 31) ^ ~SIGNBIT;
1540    }
1541    return low | ((uint64_t)high << 32);
1542}
1543
1544uint64_t HELPER(neon_addl_saturate_s64)(CPUARMState *env, uint64_t a, uint64_t b)
1545{
1546    uint64_t result;
1547
1548    result = a + b;
1549    if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) {
1550        SET_QC();
1551        result = ((int64_t)a >> 63) ^ ~SIGNBIT64;
1552    }
1553    return result;
1554}
1555
1556/* We have to do the arithmetic in a larger type than
1557 * the input type, because for example with a signed 32 bit
1558 * op the absolute difference can overflow a signed 32 bit value.
1559 */
1560#define DO_ABD(dest, x, y, intype, arithtype) do {            \
1561    arithtype tmp_x = (intype)(x);                            \
1562    arithtype tmp_y = (intype)(y);                            \
1563    dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
1564    } while(0)
1565
1566uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b)
1567{
1568    uint64_t tmp;
1569    uint64_t result;
1570    DO_ABD(result, a, b, uint8_t, uint32_t);
1571    DO_ABD(tmp, a >> 8, b >> 8, uint8_t, uint32_t);
1572    result |= tmp << 16;
1573    DO_ABD(tmp, a >> 16, b >> 16, uint8_t, uint32_t);
1574    result |= tmp << 32;
1575    DO_ABD(tmp, a >> 24, b >> 24, uint8_t, uint32_t);
1576    result |= tmp << 48;
1577    return result;
1578}
1579
1580uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b)
1581{
1582    uint64_t tmp;
1583    uint64_t result;
1584    DO_ABD(result, a, b, int8_t, int32_t);
1585    DO_ABD(tmp, a >> 8, b >> 8, int8_t, int32_t);
1586    result |= tmp << 16;
1587    DO_ABD(tmp, a >> 16, b >> 16, int8_t, int32_t);
1588    result |= tmp << 32;
1589    DO_ABD(tmp, a >> 24, b >> 24, int8_t, int32_t);
1590    result |= tmp << 48;
1591    return result;
1592}
1593
1594uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b)
1595{
1596    uint64_t tmp;
1597    uint64_t result;
1598    DO_ABD(result, a, b, uint16_t, uint32_t);
1599    DO_ABD(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
1600    return result | (tmp << 32);
1601}
1602
1603uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b)
1604{
1605    uint64_t tmp;
1606    uint64_t result;
1607    DO_ABD(result, a, b, int16_t, int32_t);
1608    DO_ABD(tmp, a >> 16, b >> 16, int16_t, int32_t);
1609    return result | (tmp << 32);
1610}
1611
1612uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b)
1613{
1614    uint64_t result;
1615    DO_ABD(result, a, b, uint32_t, uint64_t);
1616    return result;
1617}
1618
1619uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b)
1620{
1621    uint64_t result;
1622    DO_ABD(result, a, b, int32_t, int64_t);
1623    return result;
1624}
1625#undef DO_ABD
1626
1627/* Widening multiply. Named type is the source type.  */
1628#define DO_MULL(dest, x, y, type1, type2) do { \
1629    type1 tmp_x = x; \
1630    type1 tmp_y = y; \
1631    dest = (type2)((type2)tmp_x * (type2)tmp_y); \
1632    } while(0)
1633
1634uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b)
1635{
1636    uint64_t tmp;
1637    uint64_t result;
1638
1639    DO_MULL(result, a, b, uint8_t, uint16_t);
1640    DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t);
1641    result |= tmp << 16;
1642    DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t);
1643    result |= tmp << 32;
1644    DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t);
1645    result |= tmp << 48;
1646    return result;
1647}
1648
1649uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b)
1650{
1651    uint64_t tmp;
1652    uint64_t result;
1653
1654    DO_MULL(result, a, b, int8_t, uint16_t);
1655    DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t);
1656    result |= tmp << 16;
1657    DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t);
1658    result |= tmp << 32;
1659    DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t);
1660    result |= tmp << 48;
1661    return result;
1662}
1663
1664uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b)
1665{
1666    uint64_t tmp;
1667    uint64_t result;
1668
1669    DO_MULL(result, a, b, uint16_t, uint32_t);
1670    DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
1671    return result | (tmp << 32);
1672}
1673
1674uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b)
1675{
1676    uint64_t tmp;
1677    uint64_t result;
1678
1679    DO_MULL(result, a, b, int16_t, uint32_t);
1680    DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t);
1681    return result | (tmp << 32);
1682}
1683
1684uint64_t HELPER(neon_negl_u16)(uint64_t x)
1685{
1686    uint16_t tmp;
1687    uint64_t result;
1688    result = (uint16_t)-x;
1689    tmp = -(x >> 16);
1690    result |= (uint64_t)tmp << 16;
1691    tmp = -(x >> 32);
1692    result |= (uint64_t)tmp << 32;
1693    tmp = -(x >> 48);
1694    result |= (uint64_t)tmp << 48;
1695    return result;
1696}
1697
1698uint64_t HELPER(neon_negl_u32)(uint64_t x)
1699{
1700    uint32_t low = -x;
1701    uint32_t high = -(x >> 32);
1702    return low | ((uint64_t)high << 32);
1703}
1704
1705/* Saturating sign manipulation.  */
1706/* ??? Make these use NEON_VOP1 */
1707#define DO_QABS8(x) do { \
1708    if (x == (int8_t)0x80) { \
1709        x = 0x7f; \
1710        SET_QC(); \
1711    } else if (x < 0) { \
1712        x = -x; \
1713    }} while (0)
1714uint32_t HELPER(neon_qabs_s8)(CPUARMState *env, uint32_t x)
1715{
1716    neon_s8 vec;
1717    NEON_UNPACK(neon_s8, vec, x);
1718    DO_QABS8(vec.v1);
1719    DO_QABS8(vec.v2);
1720    DO_QABS8(vec.v3);
1721    DO_QABS8(vec.v4);
1722    NEON_PACK(neon_s8, x, vec);
1723    return x;
1724}
1725#undef DO_QABS8
1726
1727#define DO_QNEG8(x) do { \
1728    if (x == (int8_t)0x80) { \
1729        x = 0x7f; \
1730        SET_QC(); \
1731    } else { \
1732        x = -x; \
1733    }} while (0)
1734uint32_t HELPER(neon_qneg_s8)(CPUARMState *env, uint32_t x)
1735{
1736    neon_s8 vec;
1737    NEON_UNPACK(neon_s8, vec, x);
1738    DO_QNEG8(vec.v1);
1739    DO_QNEG8(vec.v2);
1740    DO_QNEG8(vec.v3);
1741    DO_QNEG8(vec.v4);
1742    NEON_PACK(neon_s8, x, vec);
1743    return x;
1744}
1745#undef DO_QNEG8
1746
1747#define DO_QABS16(x) do { \
1748    if (x == (int16_t)0x8000) { \
1749        x = 0x7fff; \
1750        SET_QC(); \
1751    } else if (x < 0) { \
1752        x = -x; \
1753    }} while (0)
1754uint32_t HELPER(neon_qabs_s16)(CPUARMState *env, uint32_t x)
1755{
1756    neon_s16 vec;
1757    NEON_UNPACK(neon_s16, vec, x);
1758    DO_QABS16(vec.v1);
1759    DO_QABS16(vec.v2);
1760    NEON_PACK(neon_s16, x, vec);
1761    return x;
1762}
1763#undef DO_QABS16
1764
1765#define DO_QNEG16(x) do { \
1766    if (x == (int16_t)0x8000) { \
1767        x = 0x7fff; \
1768        SET_QC(); \
1769    } else { \
1770        x = -x; \
1771    }} while (0)
1772uint32_t HELPER(neon_qneg_s16)(CPUARMState *env, uint32_t x)
1773{
1774    neon_s16 vec;
1775    NEON_UNPACK(neon_s16, vec, x);
1776    DO_QNEG16(vec.v1);
1777    DO_QNEG16(vec.v2);
1778    NEON_PACK(neon_s16, x, vec);
1779    return x;
1780}
1781#undef DO_QNEG16
1782
1783uint32_t HELPER(neon_qabs_s32)(CPUARMState *env, uint32_t x)
1784{
1785    if (x == SIGNBIT) {
1786        SET_QC();
1787        x = ~SIGNBIT;
1788    } else if ((int32_t)x < 0) {
1789        x = -x;
1790    }
1791    return x;
1792}
1793
1794uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x)
1795{
1796    if (x == SIGNBIT) {
1797        SET_QC();
1798        x = ~SIGNBIT;
1799    } else {
1800        x = -x;
1801    }
1802    return x;
1803}
1804
1805uint64_t HELPER(neon_qabs_s64)(CPUARMState *env, uint64_t x)
1806{
1807    if (x == SIGNBIT64) {
1808        SET_QC();
1809        x = ~SIGNBIT64;
1810    } else if ((int64_t)x < 0) {
1811        x = -x;
1812    }
1813    return x;
1814}
1815
1816uint64_t HELPER(neon_qneg_s64)(CPUARMState *env, uint64_t x)
1817{
1818    if (x == SIGNBIT64) {
1819        SET_QC();
1820        x = ~SIGNBIT64;
1821    } else {
1822        x = -x;
1823    }
1824    return x;
1825}
1826
1827/* NEON Float helpers.  */
1828
1829/* Floating point comparisons produce an integer result.
1830 * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
1831 * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
1832 */
1833uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp)
1834{
1835    float_status *fpst = fpstp;
1836    return -float32_eq_quiet(make_float32(a), make_float32(b), fpst);
1837}
1838
1839uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp)
1840{
1841    float_status *fpst = fpstp;
1842    return -float32_le(make_float32(b), make_float32(a), fpst);
1843}
1844
1845uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp)
1846{
1847    float_status *fpst = fpstp;
1848    return -float32_lt(make_float32(b), make_float32(a), fpst);
1849}
1850
1851uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp)
1852{
1853    float_status *fpst = fpstp;
1854    float32 f0 = float32_abs(make_float32(a));
1855    float32 f1 = float32_abs(make_float32(b));
1856    return -float32_le(f1, f0, fpst);
1857}
1858
1859uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp)
1860{
1861    float_status *fpst = fpstp;
1862    float32 f0 = float32_abs(make_float32(a));
1863    float32 f1 = float32_abs(make_float32(b));
1864    return -float32_lt(f1, f0, fpst);
1865}
1866
1867uint64_t HELPER(neon_acge_f64)(uint64_t a, uint64_t b, void *fpstp)
1868{
1869    float_status *fpst = fpstp;
1870    float64 f0 = float64_abs(make_float64(a));
1871    float64 f1 = float64_abs(make_float64(b));
1872    return -float64_le(f1, f0, fpst);
1873}
1874
1875uint64_t HELPER(neon_acgt_f64)(uint64_t a, uint64_t b, void *fpstp)
1876{
1877    float_status *fpst = fpstp;
1878    float64 f0 = float64_abs(make_float64(a));
1879    float64 f1 = float64_abs(make_float64(b));
1880    return -float64_lt(f1, f0, fpst);
1881}
1882
1883#define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
1884
1885void HELPER(neon_qunzip8)(void *vd, void *vm)
1886{
1887    uint64_t *rd = vd, *rm = vm;
1888    uint64_t zd0 = rd[0], zd1 = rd[1];
1889    uint64_t zm0 = rm[0], zm1 = rm[1];
1890
1891    uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8)
1892        | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24)
1893        | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40)
1894        | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56);
1895    uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8)
1896        | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24)
1897        | (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
1898        | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56);
1899    uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8)
1900        | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24)
1901        | (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40)
1902        | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56);
1903    uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8)
1904        | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24)
1905        | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40)
1906        | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
1907
1908    rm[0] = m0;
1909    rm[1] = m1;
1910    rd[0] = d0;
1911    rd[1] = d1;
1912}
1913
1914void HELPER(neon_qunzip16)(void *vd, void *vm)
1915{
1916    uint64_t *rd = vd, *rm = vm;
1917    uint64_t zd0 = rd[0], zd1 = rd[1];
1918    uint64_t zm0 = rm[0], zm1 = rm[1];
1919
1920    uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16)
1921        | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48);
1922    uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16)
1923        | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48);
1924    uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16)
1925        | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48);
1926    uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16)
1927        | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
1928
1929    rm[0] = m0;
1930    rm[1] = m1;
1931    rd[0] = d0;
1932    rd[1] = d1;
1933}
1934
1935void HELPER(neon_qunzip32)(void *vd, void *vm)
1936{
1937    uint64_t *rd = vd, *rm = vm;
1938    uint64_t zd0 = rd[0], zd1 = rd[1];
1939    uint64_t zm0 = rm[0], zm1 = rm[1];
1940
1941    uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32);
1942    uint64_t d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32);
1943    uint64_t m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32);
1944    uint64_t m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32);
1945
1946    rm[0] = m0;
1947    rm[1] = m1;
1948    rd[0] = d0;
1949    rd[1] = d1;
1950}
1951
1952void HELPER(neon_unzip8)(void *vd, void *vm)
1953{
1954    uint64_t *rd = vd, *rm = vm;
1955    uint64_t zd = rd[0], zm = rm[0];
1956
1957    uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8)
1958        | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24)
1959        | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40)
1960        | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56);
1961    uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8)
1962        | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24)
1963        | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40)
1964        | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56);
1965
1966    rm[0] = m0;
1967    rd[0] = d0;
1968}
1969
1970void HELPER(neon_unzip16)(void *vd, void *vm)
1971{
1972    uint64_t *rd = vd, *rm = vm;
1973    uint64_t zd = rd[0], zm = rm[0];
1974
1975    uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16)
1976        | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48);
1977    uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16)
1978        | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48);
1979
1980    rm[0] = m0;
1981    rd[0] = d0;
1982}
1983
1984void HELPER(neon_qzip8)(void *vd, void *vm)
1985{
1986    uint64_t *rd = vd, *rm = vm;
1987    uint64_t zd0 = rd[0], zd1 = rd[1];
1988    uint64_t zm0 = rm[0], zm1 = rm[1];
1989
1990    uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8)
1991        | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24)
1992        | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40)
1993        | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56);
1994    uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8)
1995        | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24)
1996        | (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40)
1997        | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56);
1998    uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8)
1999        | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24)
2000        | (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
2001        | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56);
2002    uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8)
2003        | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24)
2004        | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40)
2005        | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
2006
2007    rm[0] = m0;
2008    rm[1] = m1;
2009    rd[0] = d0;
2010    rd[1] = d1;
2011}
2012
2013void HELPER(neon_qzip16)(void *vd, void *vm)
2014{
2015    uint64_t *rd = vd, *rm = vm;
2016    uint64_t zd0 = rd[0], zd1 = rd[1];
2017    uint64_t zm0 = rm[0], zm1 = rm[1];
2018
2019    uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16)
2020        | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48);
2021    uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16)
2022        | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48);
2023    uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16)
2024        | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48);
2025    uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16)
2026        | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
2027
2028    rm[0] = m0;
2029    rm[1] = m1;
2030    rd[0] = d0;
2031    rd[1] = d1;
2032}
2033
2034void HELPER(neon_qzip32)(void *vd, void *vm)
2035{
2036    uint64_t *rd = vd, *rm = vm;
2037    uint64_t zd0 = rd[0], zd1 = rd[1];
2038    uint64_t zm0 = rm[0], zm1 = rm[1];
2039
2040    uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32);
2041    uint64_t d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32);
2042    uint64_t m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32);
2043    uint64_t m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32);
2044
2045    rm[0] = m0;
2046    rm[1] = m1;
2047    rd[0] = d0;
2048    rd[1] = d1;
2049}
2050
2051void HELPER(neon_zip8)(void *vd, void *vm)
2052{
2053    uint64_t *rd = vd, *rm = vm;
2054    uint64_t zd = rd[0], zm = rm[0];
2055
2056    uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8)
2057        | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24)
2058        | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40)
2059        | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56);
2060    uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8)
2061        | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24)
2062        | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40)
2063        | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56);
2064
2065    rm[0] = m0;
2066    rd[0] = d0;
2067}
2068
2069void HELPER(neon_zip16)(void *vd, void *vm)
2070{
2071    uint64_t *rd = vd, *rm = vm;
2072    uint64_t zd = rd[0], zm = rm[0];
2073
2074    uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16)
2075        | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48);
2076    uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16)
2077        | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48);
2078
2079    rm[0] = m0;
2080    rd[0] = d0;
2081}
2082