qemu/target/ppc/fpu_helper.c
<<
>>
Prefs
   1/*
   2 *  PowerPC floating point and SPE emulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "exec/helper-proto.h"
  22#include "exec/exec-all.h"
  23#include "internal.h"
  24#include "fpu/softfloat.h"
  25
  26static inline float128 float128_snan_to_qnan(float128 x)
  27{
  28    float128 r;
  29
  30    r.high = x.high | 0x0000800000000000;
  31    r.low = x.low;
  32    return r;
  33}
  34
  35#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
  36#define float32_snan_to_qnan(x) ((x) | 0x00400000)
  37#define float16_snan_to_qnan(x) ((x) | 0x0200)
  38
  39static inline bool fp_exceptions_enabled(CPUPPCState *env)
  40{
  41#ifdef CONFIG_USER_ONLY
  42    return true;
  43#else
  44    return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0;
  45#endif
  46}
  47
  48/*****************************************************************************/
  49/* Floating point operations helpers */
  50
  51/*
  52 * This is the non-arithmatic conversion that happens e.g. on loads.
  53 * In the Power ISA pseudocode, this is called DOUBLE.
  54 */
  55uint64_t helper_todouble(uint32_t arg)
  56{
  57    uint32_t abs_arg = arg & 0x7fffffff;
  58    uint64_t ret;
  59
  60    if (likely(abs_arg >= 0x00800000)) {
  61        /* Normalized operand, or Inf, or NaN.  */
  62        ret  = (uint64_t)extract32(arg, 30, 2) << 62;
  63        ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
  64        ret |= (uint64_t)extract32(arg, 0, 30) << 29;
  65    } else {
  66        /* Zero or Denormalized operand.  */
  67        ret = (uint64_t)extract32(arg, 31, 1) << 63;
  68        if (unlikely(abs_arg != 0)) {
  69            /* Denormalized operand.  */
  70            int shift = clz32(abs_arg) - 9;
  71            int exp = -126 - shift + 1023;
  72            ret |= (uint64_t)exp << 52;
  73            ret |= abs_arg << (shift + 29);
  74        }
  75    }
  76    return ret;
  77}
  78
  79/*
  80 * This is the non-arithmatic conversion that happens e.g. on stores.
  81 * In the Power ISA pseudocode, this is called SINGLE.
  82 */
  83uint32_t helper_tosingle(uint64_t arg)
  84{
  85    int exp = extract64(arg, 52, 11);
  86    uint32_t ret;
  87
  88    if (likely(exp > 896)) {
  89        /* No denormalization required (includes Inf, NaN).  */
  90        ret  = extract64(arg, 62, 2) << 30;
  91        ret |= extract64(arg, 29, 30);
  92    } else {
  93        /* Zero or Denormal result.  If the exponent is in bounds for
  94         * a single-precision denormal result, extract the proper bits.
  95         * If the input is not zero, and the exponent is out of bounds,
  96         * then the result is undefined; this underflows to zero.
  97         */
  98        ret = extract64(arg, 63, 1) << 31;
  99        if (unlikely(exp >= 874)) {
 100            /* Denormal result.  */
 101            ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp);
 102        }
 103    }
 104    return ret;
 105}
 106
 107static inline int ppc_float32_get_unbiased_exp(float32 f)
 108{
 109    return ((f >> 23) & 0xFF) - 127;
 110}
 111
 112static inline int ppc_float64_get_unbiased_exp(float64 f)
 113{
 114    return ((f >> 52) & 0x7FF) - 1023;
 115}
 116
 117/* Classify a floating-point number.  */
 118enum {
 119    is_normal   = 1,
 120    is_zero     = 2,
 121    is_denormal = 4,
 122    is_inf      = 8,
 123    is_qnan     = 16,
 124    is_snan     = 32,
 125    is_neg      = 64,
 126};
 127
 128#define COMPUTE_CLASS(tp)                                      \
 129static int tp##_classify(tp arg)                               \
 130{                                                              \
 131    int ret = tp##_is_neg(arg) * is_neg;                       \
 132    if (unlikely(tp##_is_any_nan(arg))) {                      \
 133        float_status dummy = { };  /* snan_bit_is_one = 0 */   \
 134        ret |= (tp##_is_signaling_nan(arg, &dummy)             \
 135                ? is_snan : is_qnan);                          \
 136    } else if (unlikely(tp##_is_infinity(arg))) {              \
 137        ret |= is_inf;                                         \
 138    } else if (tp##_is_zero(arg)) {                            \
 139        ret |= is_zero;                                        \
 140    } else if (tp##_is_zero_or_denormal(arg)) {                \
 141        ret |= is_denormal;                                    \
 142    } else {                                                   \
 143        ret |= is_normal;                                      \
 144    }                                                          \
 145    return ret;                                                \
 146}
 147
 148COMPUTE_CLASS(float16)
 149COMPUTE_CLASS(float32)
 150COMPUTE_CLASS(float64)
 151COMPUTE_CLASS(float128)
 152
 153static void set_fprf_from_class(CPUPPCState *env, int class)
 154{
 155    static const uint8_t fprf[6][2] = {
 156        { 0x04, 0x08 },  /* normalized */
 157        { 0x02, 0x12 },  /* zero */
 158        { 0x14, 0x18 },  /* denormalized */
 159        { 0x05, 0x09 },  /* infinity */
 160        { 0x11, 0x11 },  /* qnan */
 161        { 0x00, 0x00 },  /* snan -- flags are undefined */
 162    };
 163    bool isneg = class & is_neg;
 164
 165    env->fpscr &= ~(0x1F << FPSCR_FPRF);
 166    env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF;
 167}
 168
 169#define COMPUTE_FPRF(tp)                                \
 170void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
 171{                                                       \
 172    set_fprf_from_class(env, tp##_classify(arg));       \
 173}
 174
 175COMPUTE_FPRF(float16)
 176COMPUTE_FPRF(float32)
 177COMPUTE_FPRF(float64)
 178COMPUTE_FPRF(float128)
 179
 180/* Floating-point invalid operations exception */
 181static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
 182{
 183    /* Update the floating-point invalid operation summary */
 184    env->fpscr |= 1 << FPSCR_VX;
 185    /* Update the floating-point exception summary */
 186    env->fpscr |= FP_FX;
 187    if (fpscr_ve != 0) {
 188        /* Update the floating-point enabled exception summary */
 189        env->fpscr |= 1 << FPSCR_FEX;
 190        if (fp_exceptions_enabled(env)) {
 191            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 192                                   POWERPC_EXCP_FP | op, retaddr);
 193        }
 194    }
 195}
 196
 197static void finish_invalid_op_arith(CPUPPCState *env, int op,
 198                                    bool set_fpcc, uintptr_t retaddr)
 199{
 200    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 201    if (fpscr_ve == 0) {
 202        if (set_fpcc) {
 203            env->fpscr &= ~(0xF << FPSCR_FPCC);
 204            env->fpscr |= 0x11 << FPSCR_FPCC;
 205        }
 206    }
 207    finish_invalid_op_excp(env, op, retaddr);
 208}
 209
 210/* Signalling NaN */
 211static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr)
 212{
 213    env->fpscr |= 1 << FPSCR_VXSNAN;
 214    finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr);
 215}
 216
 217/* Magnitude subtraction of infinities */
 218static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc,
 219                                   uintptr_t retaddr)
 220{
 221    env->fpscr |= 1 << FPSCR_VXISI;
 222    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr);
 223}
 224
 225/* Division of infinity by infinity */
 226static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc,
 227                                   uintptr_t retaddr)
 228{
 229    env->fpscr |= 1 << FPSCR_VXIDI;
 230    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr);
 231}
 232
 233/* Division of zero by zero */
 234static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc,
 235                                   uintptr_t retaddr)
 236{
 237    env->fpscr |= 1 << FPSCR_VXZDZ;
 238    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr);
 239}
 240
 241/* Multiplication of zero by infinity */
 242static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc,
 243                                   uintptr_t retaddr)
 244{
 245    env->fpscr |= 1 << FPSCR_VXIMZ;
 246    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr);
 247}
 248
 249/* Square root of a negative number */
 250static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc,
 251                                    uintptr_t retaddr)
 252{
 253    env->fpscr |= 1 << FPSCR_VXSQRT;
 254    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr);
 255}
 256
 257/* Ordered comparison of NaN */
 258static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
 259                                  uintptr_t retaddr)
 260{
 261    env->fpscr |= 1 << FPSCR_VXVC;
 262    if (set_fpcc) {
 263        env->fpscr &= ~(0xF << FPSCR_FPCC);
 264        env->fpscr |= 0x11 << FPSCR_FPCC;
 265    }
 266    /* Update the floating-point invalid operation summary */
 267    env->fpscr |= 1 << FPSCR_VX;
 268    /* Update the floating-point exception summary */
 269    env->fpscr |= FP_FX;
 270    /* We must update the target FPR before raising the exception */
 271    if (fpscr_ve != 0) {
 272        CPUState *cs = CPU(ppc_env_get_cpu(env));
 273
 274        cs->exception_index = POWERPC_EXCP_PROGRAM;
 275        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
 276        /* Update the floating-point enabled exception summary */
 277        env->fpscr |= 1 << FPSCR_FEX;
 278        /* Exception is differed */
 279    }
 280}
 281
 282/* Invalid conversion */
 283static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
 284                                   uintptr_t retaddr)
 285{
 286    env->fpscr |= 1 << FPSCR_VXCVI;
 287    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 288    if (fpscr_ve == 0) {
 289        if (set_fpcc) {
 290            env->fpscr &= ~(0xF << FPSCR_FPCC);
 291            env->fpscr |= 0x11 << FPSCR_FPCC;
 292        }
 293    }
 294    finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr);
 295}
 296
 297static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
 298{
 299    env->fpscr |= 1 << FPSCR_ZX;
 300    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 301    /* Update the floating-point exception summary */
 302    env->fpscr |= FP_FX;
 303    if (fpscr_ze != 0) {
 304        /* Update the floating-point enabled exception summary */
 305        env->fpscr |= 1 << FPSCR_FEX;
 306        if (fp_exceptions_enabled(env)) {
 307            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 308                                   POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
 309                                   raddr);
 310        }
 311    }
 312}
 313
 314static inline void float_overflow_excp(CPUPPCState *env)
 315{
 316    CPUState *cs = CPU(ppc_env_get_cpu(env));
 317
 318    env->fpscr |= 1 << FPSCR_OX;
 319    /* Update the floating-point exception summary */
 320    env->fpscr |= FP_FX;
 321    if (fpscr_oe != 0) {
 322        /* XXX: should adjust the result */
 323        /* Update the floating-point enabled exception summary */
 324        env->fpscr |= 1 << FPSCR_FEX;
 325        /* We must update the target FPR before raising the exception */
 326        cs->exception_index = POWERPC_EXCP_PROGRAM;
 327        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 328    } else {
 329        env->fpscr |= 1 << FPSCR_XX;
 330        env->fpscr |= 1 << FPSCR_FI;
 331    }
 332}
 333
 334static inline void float_underflow_excp(CPUPPCState *env)
 335{
 336    CPUState *cs = CPU(ppc_env_get_cpu(env));
 337
 338    env->fpscr |= 1 << FPSCR_UX;
 339    /* Update the floating-point exception summary */
 340    env->fpscr |= FP_FX;
 341    if (fpscr_ue != 0) {
 342        /* XXX: should adjust the result */
 343        /* Update the floating-point enabled exception summary */
 344        env->fpscr |= 1 << FPSCR_FEX;
 345        /* We must update the target FPR before raising the exception */
 346        cs->exception_index = POWERPC_EXCP_PROGRAM;
 347        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 348    }
 349}
 350
 351static inline void float_inexact_excp(CPUPPCState *env)
 352{
 353    CPUState *cs = CPU(ppc_env_get_cpu(env));
 354
 355    env->fpscr |= 1 << FPSCR_FI;
 356    env->fpscr |= 1 << FPSCR_XX;
 357    /* Update the floating-point exception summary */
 358    env->fpscr |= FP_FX;
 359    if (fpscr_xe != 0) {
 360        /* Update the floating-point enabled exception summary */
 361        env->fpscr |= 1 << FPSCR_FEX;
 362        /* We must update the target FPR before raising the exception */
 363        cs->exception_index = POWERPC_EXCP_PROGRAM;
 364        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 365    }
 366}
 367
 368static inline void fpscr_set_rounding_mode(CPUPPCState *env)
 369{
 370    int rnd_type;
 371
 372    /* Set rounding mode */
 373    switch (fpscr_rn) {
 374    case 0:
 375        /* Best approximation (round to nearest) */
 376        rnd_type = float_round_nearest_even;
 377        break;
 378    case 1:
 379        /* Smaller magnitude (round toward zero) */
 380        rnd_type = float_round_to_zero;
 381        break;
 382    case 2:
 383        /* Round toward +infinite */
 384        rnd_type = float_round_up;
 385        break;
 386    default:
 387    case 3:
 388        /* Round toward -infinite */
 389        rnd_type = float_round_down;
 390        break;
 391    }
 392    set_float_rounding_mode(rnd_type, &env->fp_status);
 393}
 394
 395void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
 396{
 397    int prev;
 398
 399    prev = (env->fpscr >> bit) & 1;
 400    env->fpscr &= ~(1 << bit);
 401    if (prev == 1) {
 402        switch (bit) {
 403        case FPSCR_RN1:
 404        case FPSCR_RN:
 405            fpscr_set_rounding_mode(env);
 406            break;
 407        case FPSCR_VXSNAN:
 408        case FPSCR_VXISI:
 409        case FPSCR_VXIDI:
 410        case FPSCR_VXZDZ:
 411        case FPSCR_VXIMZ:
 412        case FPSCR_VXVC:
 413        case FPSCR_VXSOFT:
 414        case FPSCR_VXSQRT:
 415        case FPSCR_VXCVI:
 416            if (!fpscr_ix) {
 417                /* Set VX bit to zero */
 418                env->fpscr &= ~(1 << FPSCR_VX);
 419            }
 420            break;
 421        case FPSCR_OX:
 422        case FPSCR_UX:
 423        case FPSCR_ZX:
 424        case FPSCR_XX:
 425        case FPSCR_VE:
 426        case FPSCR_OE:
 427        case FPSCR_UE:
 428        case FPSCR_ZE:
 429        case FPSCR_XE:
 430            if (!fpscr_eex) {
 431                /* Set the FEX bit */
 432                env->fpscr &= ~(1 << FPSCR_FEX);
 433            }
 434            break;
 435        default:
 436            break;
 437        }
 438    }
 439}
 440
 441void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
 442{
 443    CPUState *cs = CPU(ppc_env_get_cpu(env));
 444    int prev;
 445
 446    prev = (env->fpscr >> bit) & 1;
 447    env->fpscr |= 1 << bit;
 448    if (prev == 0) {
 449        switch (bit) {
 450        case FPSCR_VX:
 451            env->fpscr |= FP_FX;
 452            if (fpscr_ve) {
 453                goto raise_ve;
 454            }
 455            break;
 456        case FPSCR_OX:
 457            env->fpscr |= FP_FX;
 458            if (fpscr_oe) {
 459                goto raise_oe;
 460            }
 461            break;
 462        case FPSCR_UX:
 463            env->fpscr |= FP_FX;
 464            if (fpscr_ue) {
 465                goto raise_ue;
 466            }
 467            break;
 468        case FPSCR_ZX:
 469            env->fpscr |= FP_FX;
 470            if (fpscr_ze) {
 471                goto raise_ze;
 472            }
 473            break;
 474        case FPSCR_XX:
 475            env->fpscr |= FP_FX;
 476            if (fpscr_xe) {
 477                goto raise_xe;
 478            }
 479            break;
 480        case FPSCR_VXSNAN:
 481        case FPSCR_VXISI:
 482        case FPSCR_VXIDI:
 483        case FPSCR_VXZDZ:
 484        case FPSCR_VXIMZ:
 485        case FPSCR_VXVC:
 486        case FPSCR_VXSOFT:
 487        case FPSCR_VXSQRT:
 488        case FPSCR_VXCVI:
 489            env->fpscr |= 1 << FPSCR_VX;
 490            env->fpscr |= FP_FX;
 491            if (fpscr_ve != 0) {
 492                goto raise_ve;
 493            }
 494            break;
 495        case FPSCR_VE:
 496            if (fpscr_vx != 0) {
 497            raise_ve:
 498                env->error_code = POWERPC_EXCP_FP;
 499                if (fpscr_vxsnan) {
 500                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
 501                }
 502                if (fpscr_vxisi) {
 503                    env->error_code |= POWERPC_EXCP_FP_VXISI;
 504                }
 505                if (fpscr_vxidi) {
 506                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
 507                }
 508                if (fpscr_vxzdz) {
 509                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
 510                }
 511                if (fpscr_vximz) {
 512                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
 513                }
 514                if (fpscr_vxvc) {
 515                    env->error_code |= POWERPC_EXCP_FP_VXVC;
 516                }
 517                if (fpscr_vxsoft) {
 518                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
 519                }
 520                if (fpscr_vxsqrt) {
 521                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
 522                }
 523                if (fpscr_vxcvi) {
 524                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
 525                }
 526                goto raise_excp;
 527            }
 528            break;
 529        case FPSCR_OE:
 530            if (fpscr_ox != 0) {
 531            raise_oe:
 532                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 533                goto raise_excp;
 534            }
 535            break;
 536        case FPSCR_UE:
 537            if (fpscr_ux != 0) {
 538            raise_ue:
 539                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 540                goto raise_excp;
 541            }
 542            break;
 543        case FPSCR_ZE:
 544            if (fpscr_zx != 0) {
 545            raise_ze:
 546                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
 547                goto raise_excp;
 548            }
 549            break;
 550        case FPSCR_XE:
 551            if (fpscr_xx != 0) {
 552            raise_xe:
 553                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 554                goto raise_excp;
 555            }
 556            break;
 557        case FPSCR_RN1:
 558        case FPSCR_RN:
 559            fpscr_set_rounding_mode(env);
 560            break;
 561        default:
 562            break;
 563        raise_excp:
 564            /* Update the floating-point enabled exception summary */
 565            env->fpscr |= 1 << FPSCR_FEX;
 566            /* We have to update Rc1 before raising the exception */
 567            cs->exception_index = POWERPC_EXCP_PROGRAM;
 568            break;
 569        }
 570    }
 571}
 572
 573void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 574{
 575    CPUState *cs = CPU(ppc_env_get_cpu(env));
 576    target_ulong prev, new;
 577    int i;
 578
 579    prev = env->fpscr;
 580    new = (target_ulong)arg;
 581    new &= ~0x60000000LL;
 582    new |= prev & 0x60000000LL;
 583    for (i = 0; i < sizeof(target_ulong) * 2; i++) {
 584        if (mask & (1 << i)) {
 585            env->fpscr &= ~(0xFLL << (4 * i));
 586            env->fpscr |= new & (0xFLL << (4 * i));
 587        }
 588    }
 589    /* Update VX and FEX */
 590    if (fpscr_ix != 0) {
 591        env->fpscr |= 1 << FPSCR_VX;
 592    } else {
 593        env->fpscr &= ~(1 << FPSCR_VX);
 594    }
 595    if ((fpscr_ex & fpscr_eex) != 0) {
 596        env->fpscr |= 1 << FPSCR_FEX;
 597        cs->exception_index = POWERPC_EXCP_PROGRAM;
 598        /* XXX: we should compute it properly */
 599        env->error_code = POWERPC_EXCP_FP;
 600    } else {
 601        env->fpscr &= ~(1 << FPSCR_FEX);
 602    }
 603    fpscr_set_rounding_mode(env);
 604}
 605
 606void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 607{
 608    helper_store_fpscr(env, arg, mask);
 609}
 610
 611static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
 612{
 613    CPUState *cs = CPU(ppc_env_get_cpu(env));
 614    int status = get_float_exception_flags(&env->fp_status);
 615    bool inexact_happened = false;
 616
 617    if (status & float_flag_overflow) {
 618        float_overflow_excp(env);
 619    } else if (status & float_flag_underflow) {
 620        float_underflow_excp(env);
 621    } else if (status & float_flag_inexact) {
 622        float_inexact_excp(env);
 623        inexact_happened = true;
 624    }
 625
 626    /* if the inexact flag was not set */
 627    if (inexact_happened == false) {
 628        env->fpscr &= ~(1 << FPSCR_FI); /* clear the FPSCR[FI] bit */
 629    }
 630
 631    if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
 632        (env->error_code & POWERPC_EXCP_FP)) {
 633        /* Differred floating-point exception after target FPR update */
 634        if (fp_exceptions_enabled(env)) {
 635            raise_exception_err_ra(env, cs->exception_index,
 636                                   env->error_code, raddr);
 637        }
 638    }
 639}
 640
 641void helper_float_check_status(CPUPPCState *env)
 642{
 643    do_float_check_status(env, GETPC());
 644}
 645
 646void helper_reset_fpstatus(CPUPPCState *env)
 647{
 648    set_float_exception_flags(0, &env->fp_status);
 649}
 650
 651static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc,
 652                                    uintptr_t retaddr, int classes)
 653{
 654    if ((classes & ~is_neg) == is_inf) {
 655        /* Magnitude subtraction of infinities */
 656        float_invalid_op_vxisi(env, set_fpcc, retaddr);
 657    } else if (classes & is_snan) {
 658        float_invalid_op_vxsnan(env, retaddr);
 659    }
 660}
 661
 662/* fadd - fadd. */
 663float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
 664{
 665    float64 ret = float64_add(arg1, arg2, &env->fp_status);
 666    int status = get_float_exception_flags(&env->fp_status);
 667
 668    if (unlikely(status & float_flag_invalid)) {
 669        float_invalid_op_addsub(env, 1, GETPC(),
 670                                float64_classify(arg1) |
 671                                float64_classify(arg2));
 672    }
 673
 674    return ret;
 675}
 676
 677/* fsub - fsub. */
 678float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
 679{
 680    float64 ret = float64_sub(arg1, arg2, &env->fp_status);
 681    int status = get_float_exception_flags(&env->fp_status);
 682
 683    if (unlikely(status & float_flag_invalid)) {
 684        float_invalid_op_addsub(env, 1, GETPC(),
 685                                float64_classify(arg1) |
 686                                float64_classify(arg2));
 687    }
 688
 689    return ret;
 690}
 691
 692static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc,
 693                                 uintptr_t retaddr, int classes)
 694{
 695    if ((classes & (is_zero | is_inf)) == (is_zero | is_inf)) {
 696        /* Multiplication of zero by infinity */
 697        float_invalid_op_vximz(env, set_fprc, retaddr);
 698    } else if (classes & is_snan) {
 699        float_invalid_op_vxsnan(env, retaddr);
 700    }
 701}
 702
 703/* fmul - fmul. */
 704float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
 705{
 706    float64 ret = float64_mul(arg1, arg2, &env->fp_status);
 707    int status = get_float_exception_flags(&env->fp_status);
 708
 709    if (unlikely(status & float_flag_invalid)) {
 710        float_invalid_op_mul(env, 1, GETPC(),
 711                             float64_classify(arg1) |
 712                             float64_classify(arg2));
 713    }
 714
 715    return ret;
 716}
 717
 718static void float_invalid_op_div(CPUPPCState *env, bool set_fprc,
 719                                 uintptr_t retaddr, int classes)
 720{
 721    classes &= ~is_neg;
 722    if (classes == is_inf) {
 723        /* Division of infinity by infinity */
 724        float_invalid_op_vxidi(env, set_fprc, retaddr);
 725    } else if (classes == is_zero) {
 726        /* Division of zero by zero */
 727        float_invalid_op_vxzdz(env, set_fprc, retaddr);
 728    } else if (classes & is_snan) {
 729        float_invalid_op_vxsnan(env, retaddr);
 730    }
 731}
 732
 733/* fdiv - fdiv. */
 734float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
 735{
 736    float64 ret = float64_div(arg1, arg2, &env->fp_status);
 737    int status = get_float_exception_flags(&env->fp_status);
 738
 739    if (unlikely(status)) {
 740        if (status & float_flag_invalid) {
 741            float_invalid_op_div(env, 1, GETPC(),
 742                                 float64_classify(arg1) |
 743                                 float64_classify(arg2));
 744        }
 745        if (status & float_flag_divbyzero) {
 746            float_zero_divide_excp(env, GETPC());
 747        }
 748    }
 749
 750    return ret;
 751}
 752
 753static void float_invalid_cvt(CPUPPCState *env, bool set_fprc,
 754                              uintptr_t retaddr, int class1)
 755{
 756    float_invalid_op_vxcvi(env, set_fprc, retaddr);
 757    if (class1 & is_snan) {
 758        float_invalid_op_vxsnan(env, retaddr);
 759    }
 760}
 761
 762#define FPU_FCTI(op, cvt, nanval)                                      \
 763uint64_t helper_##op(CPUPPCState *env, float64 arg)                    \
 764{                                                                      \
 765    uint64_t ret = float64_to_##cvt(arg, &env->fp_status);             \
 766    int status = get_float_exception_flags(&env->fp_status);           \
 767                                                                       \
 768    if (unlikely(status)) {                                            \
 769        if (status & float_flag_invalid) {                             \
 770            float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \
 771            ret = nanval;                                              \
 772        }                                                              \
 773        do_float_check_status(env, GETPC());                           \
 774    }                                                                  \
 775    return ret;                                                        \
 776}
 777
 778FPU_FCTI(fctiw, int32, 0x80000000U)
 779FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
 780FPU_FCTI(fctiwu, uint32, 0x00000000U)
 781FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
 782FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
 783FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
 784FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
 785FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
 786
 787#define FPU_FCFI(op, cvtr, is_single)                      \
 788uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
 789{                                                          \
 790    CPU_DoubleU farg;                                      \
 791                                                           \
 792    if (is_single) {                                       \
 793        float32 tmp = cvtr(arg, &env->fp_status);          \
 794        farg.d = float32_to_float64(tmp, &env->fp_status); \
 795    } else {                                               \
 796        farg.d = cvtr(arg, &env->fp_status);               \
 797    }                                                      \
 798    do_float_check_status(env, GETPC());                   \
 799    return farg.ll;                                        \
 800}
 801
 802FPU_FCFI(fcfid, int64_to_float64, 0)
 803FPU_FCFI(fcfids, int64_to_float32, 1)
 804FPU_FCFI(fcfidu, uint64_to_float64, 0)
 805FPU_FCFI(fcfidus, uint64_to_float32, 1)
 806
 807static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
 808                              int rounding_mode)
 809{
 810    CPU_DoubleU farg;
 811
 812    farg.ll = arg;
 813
 814    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 815        /* sNaN round */
 816        float_invalid_op_vxsnan(env, GETPC());
 817        farg.ll = arg | 0x0008000000000000ULL;
 818    } else {
 819        int inexact = get_float_exception_flags(&env->fp_status) &
 820                      float_flag_inexact;
 821        set_float_rounding_mode(rounding_mode, &env->fp_status);
 822        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
 823        /* Restore rounding mode from FPSCR */
 824        fpscr_set_rounding_mode(env);
 825
 826        /* fri* does not set FPSCR[XX] */
 827        if (!inexact) {
 828            env->fp_status.float_exception_flags &= ~float_flag_inexact;
 829        }
 830    }
 831    do_float_check_status(env, GETPC());
 832    return farg.ll;
 833}
 834
 835uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
 836{
 837    return do_fri(env, arg, float_round_ties_away);
 838}
 839
 840uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
 841{
 842    return do_fri(env, arg, float_round_to_zero);
 843}
 844
 845uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
 846{
 847    return do_fri(env, arg, float_round_up);
 848}
 849
 850uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
 851{
 852    return do_fri(env, arg, float_round_down);
 853}
 854
 855#define FPU_MADDSUB_UPDATE(NAME, TP)                                    \
 856static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3,           \
 857                 unsigned int madd_flags, uintptr_t retaddr)            \
 858{                                                                       \
 859    if (TP##_is_signaling_nan(arg1, &env->fp_status) ||                 \
 860        TP##_is_signaling_nan(arg2, &env->fp_status) ||                 \
 861        TP##_is_signaling_nan(arg3, &env->fp_status)) {                 \
 862        /* sNaN operation */                                            \
 863        float_invalid_op_vxsnan(env, retaddr);                          \
 864    }                                                                   \
 865    if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) ||               \
 866        (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) {               \
 867        /* Multiplication of zero by infinity */                        \
 868        float_invalid_op_vximz(env, 1, retaddr);                        \
 869    }                                                                   \
 870    if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) &&           \
 871        TP##_is_infinity(arg3)) {                                       \
 872        uint8_t aSign, bSign, cSign;                                    \
 873                                                                        \
 874        aSign = TP##_is_neg(arg1);                                      \
 875        bSign = TP##_is_neg(arg2);                                      \
 876        cSign = TP##_is_neg(arg3);                                      \
 877        if (madd_flags & float_muladd_negate_c) {                       \
 878            cSign ^= 1;                                                 \
 879        }                                                               \
 880        if (aSign ^ bSign ^ cSign) {                                    \
 881            float_invalid_op_vxisi(env, 1, retaddr);                    \
 882        }                                                               \
 883    }                                                                   \
 884}
 885FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
 886FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
 887
 888#define FPU_FMADD(op, madd_flags)                                       \
 889uint64_t helper_##op(CPUPPCState *env, uint64_t arg1,                   \
 890                     uint64_t arg2, uint64_t arg3)                      \
 891{                                                                       \
 892    uint32_t flags;                                                     \
 893    float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags,          \
 894                                 &env->fp_status);                      \
 895    flags = get_float_exception_flags(&env->fp_status);                 \
 896    if (flags) {                                                        \
 897        if (flags & float_flag_invalid) {                               \
 898            float64_maddsub_update_excp(env, arg1, arg2, arg3,          \
 899                                        madd_flags, GETPC());           \
 900        }                                                               \
 901        do_float_check_status(env, GETPC());                            \
 902    }                                                                   \
 903    return ret;                                                         \
 904}
 905
 906#define MADD_FLGS 0
 907#define MSUB_FLGS float_muladd_negate_c
 908#define NMADD_FLGS float_muladd_negate_result
 909#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
 910
 911FPU_FMADD(fmadd, MADD_FLGS)
 912FPU_FMADD(fnmadd, NMADD_FLGS)
 913FPU_FMADD(fmsub, MSUB_FLGS)
 914FPU_FMADD(fnmsub, NMSUB_FLGS)
 915
 916/* frsp - frsp. */
 917uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
 918{
 919    CPU_DoubleU farg;
 920    float32 f32;
 921
 922    farg.ll = arg;
 923
 924    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 925        float_invalid_op_vxsnan(env, GETPC());
 926    }
 927    f32 = float64_to_float32(farg.d, &env->fp_status);
 928    farg.d = float32_to_float64(f32, &env->fp_status);
 929
 930    return farg.ll;
 931}
 932
 933/* fsqrt - fsqrt. */
 934float64 helper_fsqrt(CPUPPCState *env, float64 arg)
 935{
 936    float64 ret = float64_sqrt(arg, &env->fp_status);
 937    int status = get_float_exception_flags(&env->fp_status);
 938
 939    if (unlikely(status & float_flag_invalid)) {
 940        if (unlikely(float64_is_any_nan(arg))) {
 941            if (unlikely(float64_is_signaling_nan(arg, &env->fp_status))) {
 942                /* sNaN square root */
 943                float_invalid_op_vxsnan(env, GETPC());
 944            }
 945        } else {
 946            /* Square root of a negative nonzero number */
 947            float_invalid_op_vxsqrt(env, 1, GETPC());
 948        }
 949    }
 950
 951    return ret;
 952}
 953
 954/* fre - fre. */
 955float64 helper_fre(CPUPPCState *env, float64 arg)
 956{
 957    /* "Estimate" the reciprocal with actual division.  */
 958    float64 ret = float64_div(float64_one, arg, &env->fp_status);
 959    int status = get_float_exception_flags(&env->fp_status);
 960
 961    if (unlikely(status)) {
 962        if (status & float_flag_invalid) {
 963            if (float64_is_signaling_nan(arg, &env->fp_status)) {
 964                /* sNaN reciprocal */
 965                float_invalid_op_vxsnan(env, GETPC());
 966            }
 967        }
 968        if (status & float_flag_divbyzero) {
 969            float_zero_divide_excp(env, GETPC());
 970            /* For FPSCR.ZE == 0, the result is 1/2.  */
 971            ret = float64_set_sign(float64_half, float64_is_neg(arg));
 972        }
 973    }
 974
 975    return ret;
 976}
 977
 978/* fres - fres. */
 979uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
 980{
 981    CPU_DoubleU farg;
 982    float32 f32;
 983
 984    farg.ll = arg;
 985
 986    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 987        /* sNaN reciprocal */
 988        float_invalid_op_vxsnan(env, GETPC());
 989    }
 990    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
 991    f32 = float64_to_float32(farg.d, &env->fp_status);
 992    farg.d = float32_to_float64(f32, &env->fp_status);
 993
 994    return farg.ll;
 995}
 996
 997/* frsqrte  - frsqrte. */
 998float64 helper_frsqrte(CPUPPCState *env, float64 arg)
 999{
1000    /* "Estimate" the reciprocal with actual division.  */
1001    float64 rets = float64_sqrt(arg, &env->fp_status);
1002    float64 retd = float64_div(float64_one, rets, &env->fp_status);
1003    int status = get_float_exception_flags(&env->fp_status);
1004
1005    if (unlikely(status)) {
1006        if (status & float_flag_invalid) {
1007            if (float64_is_signaling_nan(arg, &env->fp_status)) {
1008                /* sNaN reciprocal */
1009                float_invalid_op_vxsnan(env, GETPC());
1010            } else {
1011                /* Square root of a negative nonzero number */
1012                float_invalid_op_vxsqrt(env, 1, GETPC());
1013            }
1014        }
1015        if (status & float_flag_divbyzero) {
1016            /* Reciprocal of (square root of) zero.  */
1017            float_zero_divide_excp(env, GETPC());
1018        }
1019    }
1020
1021    return retd;
1022}
1023
1024/* fsel - fsel. */
1025uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1026                     uint64_t arg3)
1027{
1028    CPU_DoubleU farg1;
1029
1030    farg1.ll = arg1;
1031
1032    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
1033        !float64_is_any_nan(farg1.d)) {
1034        return arg2;
1035    } else {
1036        return arg3;
1037    }
1038}
1039
1040uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
1041{
1042    int fe_flag = 0;
1043    int fg_flag = 0;
1044
1045    if (unlikely(float64_is_infinity(fra) ||
1046                 float64_is_infinity(frb) ||
1047                 float64_is_zero(frb))) {
1048        fe_flag = 1;
1049        fg_flag = 1;
1050    } else {
1051        int e_a = ppc_float64_get_unbiased_exp(fra);
1052        int e_b = ppc_float64_get_unbiased_exp(frb);
1053
1054        if (unlikely(float64_is_any_nan(fra) ||
1055                     float64_is_any_nan(frb))) {
1056            fe_flag = 1;
1057        } else if ((e_b <= -1022) || (e_b >= 1021)) {
1058            fe_flag = 1;
1059        } else if (!float64_is_zero(fra) &&
1060                   (((e_a - e_b) >= 1023) ||
1061                    ((e_a - e_b) <= -1021) ||
1062                    (e_a <= -970))) {
1063            fe_flag = 1;
1064        }
1065
1066        if (unlikely(float64_is_zero_or_denormal(frb))) {
1067            /* XB is not zero because of the above check and */
1068            /* so must be denormalized.                      */
1069            fg_flag = 1;
1070        }
1071    }
1072
1073    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1074}
1075
1076uint32_t helper_ftsqrt(uint64_t frb)
1077{
1078    int fe_flag = 0;
1079    int fg_flag = 0;
1080
1081    if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
1082        fe_flag = 1;
1083        fg_flag = 1;
1084    } else {
1085        int e_b = ppc_float64_get_unbiased_exp(frb);
1086
1087        if (unlikely(float64_is_any_nan(frb))) {
1088            fe_flag = 1;
1089        } else if (unlikely(float64_is_zero(frb))) {
1090            fe_flag = 1;
1091        } else if (unlikely(float64_is_neg(frb))) {
1092            fe_flag = 1;
1093        } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
1094            fe_flag = 1;
1095        }
1096
1097        if (unlikely(float64_is_zero_or_denormal(frb))) {
1098            /* XB is not zero because of the above check and */
1099            /* therefore must be denormalized.               */
1100            fg_flag = 1;
1101        }
1102    }
1103
1104    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1105}
1106
1107void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1108                  uint32_t crfD)
1109{
1110    CPU_DoubleU farg1, farg2;
1111    uint32_t ret = 0;
1112
1113    farg1.ll = arg1;
1114    farg2.ll = arg2;
1115
1116    if (unlikely(float64_is_any_nan(farg1.d) ||
1117                 float64_is_any_nan(farg2.d))) {
1118        ret = 0x01UL;
1119    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1120        ret = 0x08UL;
1121    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1122        ret = 0x04UL;
1123    } else {
1124        ret = 0x02UL;
1125    }
1126
1127    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1128    env->fpscr |= ret << FPSCR_FPRF;
1129    env->crf[crfD] = ret;
1130    if (unlikely(ret == 0x01UL
1131                 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1132                     float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1133        /* sNaN comparison */
1134        float_invalid_op_vxsnan(env, GETPC());
1135    }
1136}
1137
1138void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1139                  uint32_t crfD)
1140{
1141    CPU_DoubleU farg1, farg2;
1142    uint32_t ret = 0;
1143
1144    farg1.ll = arg1;
1145    farg2.ll = arg2;
1146
1147    if (unlikely(float64_is_any_nan(farg1.d) ||
1148                 float64_is_any_nan(farg2.d))) {
1149        ret = 0x01UL;
1150    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1151        ret = 0x08UL;
1152    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1153        ret = 0x04UL;
1154    } else {
1155        ret = 0x02UL;
1156    }
1157
1158    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1159    env->fpscr |= ret << FPSCR_FPRF;
1160    env->crf[crfD] = ret;
1161    if (unlikely(ret == 0x01UL)) {
1162        float_invalid_op_vxvc(env, 1, GETPC());
1163        if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1164            float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1165            /* sNaN comparison */
1166            float_invalid_op_vxsnan(env, GETPC());
1167        }
1168    }
1169}
1170
1171/* Single-precision floating-point conversions */
1172static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1173{
1174    CPU_FloatU u;
1175
1176    u.f = int32_to_float32(val, &env->vec_status);
1177
1178    return u.l;
1179}
1180
1181static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1182{
1183    CPU_FloatU u;
1184
1185    u.f = uint32_to_float32(val, &env->vec_status);
1186
1187    return u.l;
1188}
1189
1190static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1191{
1192    CPU_FloatU u;
1193
1194    u.l = val;
1195    /* NaN are not treated the same way IEEE 754 does */
1196    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1197        return 0;
1198    }
1199
1200    return float32_to_int32(u.f, &env->vec_status);
1201}
1202
1203static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1204{
1205    CPU_FloatU u;
1206
1207    u.l = val;
1208    /* NaN are not treated the same way IEEE 754 does */
1209    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1210        return 0;
1211    }
1212
1213    return float32_to_uint32(u.f, &env->vec_status);
1214}
1215
1216static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1217{
1218    CPU_FloatU u;
1219
1220    u.l = val;
1221    /* NaN are not treated the same way IEEE 754 does */
1222    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1223        return 0;
1224    }
1225
1226    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1227}
1228
1229static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1230{
1231    CPU_FloatU u;
1232
1233    u.l = val;
1234    /* NaN are not treated the same way IEEE 754 does */
1235    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1236        return 0;
1237    }
1238
1239    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1240}
1241
1242static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1243{
1244    CPU_FloatU u;
1245    float32 tmp;
1246
1247    u.f = int32_to_float32(val, &env->vec_status);
1248    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1249    u.f = float32_div(u.f, tmp, &env->vec_status);
1250
1251    return u.l;
1252}
1253
1254static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1255{
1256    CPU_FloatU u;
1257    float32 tmp;
1258
1259    u.f = uint32_to_float32(val, &env->vec_status);
1260    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1261    u.f = float32_div(u.f, tmp, &env->vec_status);
1262
1263    return u.l;
1264}
1265
1266static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1267{
1268    CPU_FloatU u;
1269    float32 tmp;
1270
1271    u.l = val;
1272    /* NaN are not treated the same way IEEE 754 does */
1273    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1274        return 0;
1275    }
1276    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1277    u.f = float32_mul(u.f, tmp, &env->vec_status);
1278
1279    return float32_to_int32(u.f, &env->vec_status);
1280}
1281
1282static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1283{
1284    CPU_FloatU u;
1285    float32 tmp;
1286
1287    u.l = val;
1288    /* NaN are not treated the same way IEEE 754 does */
1289    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1290        return 0;
1291    }
1292    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1293    u.f = float32_mul(u.f, tmp, &env->vec_status);
1294
1295    return float32_to_uint32(u.f, &env->vec_status);
1296}
1297
1298#define HELPER_SPE_SINGLE_CONV(name)                              \
1299    uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1300    {                                                             \
1301        return e##name(env, val);                                 \
1302    }
1303/* efscfsi */
1304HELPER_SPE_SINGLE_CONV(fscfsi);
1305/* efscfui */
1306HELPER_SPE_SINGLE_CONV(fscfui);
1307/* efscfuf */
1308HELPER_SPE_SINGLE_CONV(fscfuf);
1309/* efscfsf */
1310HELPER_SPE_SINGLE_CONV(fscfsf);
1311/* efsctsi */
1312HELPER_SPE_SINGLE_CONV(fsctsi);
1313/* efsctui */
1314HELPER_SPE_SINGLE_CONV(fsctui);
1315/* efsctsiz */
1316HELPER_SPE_SINGLE_CONV(fsctsiz);
1317/* efsctuiz */
1318HELPER_SPE_SINGLE_CONV(fsctuiz);
1319/* efsctsf */
1320HELPER_SPE_SINGLE_CONV(fsctsf);
1321/* efsctuf */
1322HELPER_SPE_SINGLE_CONV(fsctuf);
1323
1324#define HELPER_SPE_VECTOR_CONV(name)                            \
1325    uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1326    {                                                           \
1327        return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1328            (uint64_t)e##name(env, val);                        \
1329    }
1330/* evfscfsi */
1331HELPER_SPE_VECTOR_CONV(fscfsi);
1332/* evfscfui */
1333HELPER_SPE_VECTOR_CONV(fscfui);
1334/* evfscfuf */
1335HELPER_SPE_VECTOR_CONV(fscfuf);
1336/* evfscfsf */
1337HELPER_SPE_VECTOR_CONV(fscfsf);
1338/* evfsctsi */
1339HELPER_SPE_VECTOR_CONV(fsctsi);
1340/* evfsctui */
1341HELPER_SPE_VECTOR_CONV(fsctui);
1342/* evfsctsiz */
1343HELPER_SPE_VECTOR_CONV(fsctsiz);
1344/* evfsctuiz */
1345HELPER_SPE_VECTOR_CONV(fsctuiz);
1346/* evfsctsf */
1347HELPER_SPE_VECTOR_CONV(fsctsf);
1348/* evfsctuf */
1349HELPER_SPE_VECTOR_CONV(fsctuf);
1350
1351/* Single-precision floating-point arithmetic */
1352static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1353{
1354    CPU_FloatU u1, u2;
1355
1356    u1.l = op1;
1357    u2.l = op2;
1358    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1359    return u1.l;
1360}
1361
1362static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1363{
1364    CPU_FloatU u1, u2;
1365
1366    u1.l = op1;
1367    u2.l = op2;
1368    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1369    return u1.l;
1370}
1371
1372static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1373{
1374    CPU_FloatU u1, u2;
1375
1376    u1.l = op1;
1377    u2.l = op2;
1378    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1379    return u1.l;
1380}
1381
1382static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1383{
1384    CPU_FloatU u1, u2;
1385
1386    u1.l = op1;
1387    u2.l = op2;
1388    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1389    return u1.l;
1390}
1391
1392#define HELPER_SPE_SINGLE_ARITH(name)                                   \
1393    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1394    {                                                                   \
1395        return e##name(env, op1, op2);                                  \
1396    }
1397/* efsadd */
1398HELPER_SPE_SINGLE_ARITH(fsadd);
1399/* efssub */
1400HELPER_SPE_SINGLE_ARITH(fssub);
1401/* efsmul */
1402HELPER_SPE_SINGLE_ARITH(fsmul);
1403/* efsdiv */
1404HELPER_SPE_SINGLE_ARITH(fsdiv);
1405
1406#define HELPER_SPE_VECTOR_ARITH(name)                                   \
1407    uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1408    {                                                                   \
1409        return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1410            (uint64_t)e##name(env, op1, op2);                           \
1411    }
1412/* evfsadd */
1413HELPER_SPE_VECTOR_ARITH(fsadd);
1414/* evfssub */
1415HELPER_SPE_VECTOR_ARITH(fssub);
1416/* evfsmul */
1417HELPER_SPE_VECTOR_ARITH(fsmul);
1418/* evfsdiv */
1419HELPER_SPE_VECTOR_ARITH(fsdiv);
1420
1421/* Single-precision floating-point comparisons */
1422static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1423{
1424    CPU_FloatU u1, u2;
1425
1426    u1.l = op1;
1427    u2.l = op2;
1428    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1429}
1430
1431static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1432{
1433    CPU_FloatU u1, u2;
1434
1435    u1.l = op1;
1436    u2.l = op2;
1437    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1438}
1439
1440static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1441{
1442    CPU_FloatU u1, u2;
1443
1444    u1.l = op1;
1445    u2.l = op2;
1446    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1447}
1448
1449static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1450{
1451    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1452    return efscmplt(env, op1, op2);
1453}
1454
1455static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1456{
1457    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1458    return efscmpgt(env, op1, op2);
1459}
1460
1461static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1462{
1463    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1464    return efscmpeq(env, op1, op2);
1465}
1466
1467#define HELPER_SINGLE_SPE_CMP(name)                                     \
1468    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1469    {                                                                   \
1470        return e##name(env, op1, op2);                                  \
1471    }
1472/* efststlt */
1473HELPER_SINGLE_SPE_CMP(fststlt);
1474/* efststgt */
1475HELPER_SINGLE_SPE_CMP(fststgt);
1476/* efststeq */
1477HELPER_SINGLE_SPE_CMP(fststeq);
1478/* efscmplt */
1479HELPER_SINGLE_SPE_CMP(fscmplt);
1480/* efscmpgt */
1481HELPER_SINGLE_SPE_CMP(fscmpgt);
1482/* efscmpeq */
1483HELPER_SINGLE_SPE_CMP(fscmpeq);
1484
1485static inline uint32_t evcmp_merge(int t0, int t1)
1486{
1487    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1488}
1489
1490#define HELPER_VECTOR_SPE_CMP(name)                                     \
1491    uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1492    {                                                                   \
1493        return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1494                           e##name(env, op1, op2));                     \
1495    }
1496/* evfststlt */
1497HELPER_VECTOR_SPE_CMP(fststlt);
1498/* evfststgt */
1499HELPER_VECTOR_SPE_CMP(fststgt);
1500/* evfststeq */
1501HELPER_VECTOR_SPE_CMP(fststeq);
1502/* evfscmplt */
1503HELPER_VECTOR_SPE_CMP(fscmplt);
1504/* evfscmpgt */
1505HELPER_VECTOR_SPE_CMP(fscmpgt);
1506/* evfscmpeq */
1507HELPER_VECTOR_SPE_CMP(fscmpeq);
1508
1509/* Double-precision floating-point conversion */
1510uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1511{
1512    CPU_DoubleU u;
1513
1514    u.d = int32_to_float64(val, &env->vec_status);
1515
1516    return u.ll;
1517}
1518
1519uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1520{
1521    CPU_DoubleU u;
1522
1523    u.d = int64_to_float64(val, &env->vec_status);
1524
1525    return u.ll;
1526}
1527
1528uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1529{
1530    CPU_DoubleU u;
1531
1532    u.d = uint32_to_float64(val, &env->vec_status);
1533
1534    return u.ll;
1535}
1536
1537uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1538{
1539    CPU_DoubleU u;
1540
1541    u.d = uint64_to_float64(val, &env->vec_status);
1542
1543    return u.ll;
1544}
1545
1546uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1547{
1548    CPU_DoubleU u;
1549
1550    u.ll = val;
1551    /* NaN are not treated the same way IEEE 754 does */
1552    if (unlikely(float64_is_any_nan(u.d))) {
1553        return 0;
1554    }
1555
1556    return float64_to_int32(u.d, &env->vec_status);
1557}
1558
1559uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1560{
1561    CPU_DoubleU u;
1562
1563    u.ll = val;
1564    /* NaN are not treated the same way IEEE 754 does */
1565    if (unlikely(float64_is_any_nan(u.d))) {
1566        return 0;
1567    }
1568
1569    return float64_to_uint32(u.d, &env->vec_status);
1570}
1571
1572uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1573{
1574    CPU_DoubleU u;
1575
1576    u.ll = val;
1577    /* NaN are not treated the same way IEEE 754 does */
1578    if (unlikely(float64_is_any_nan(u.d))) {
1579        return 0;
1580    }
1581
1582    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1583}
1584
1585uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1586{
1587    CPU_DoubleU u;
1588
1589    u.ll = val;
1590    /* NaN are not treated the same way IEEE 754 does */
1591    if (unlikely(float64_is_any_nan(u.d))) {
1592        return 0;
1593    }
1594
1595    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1596}
1597
1598uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1599{
1600    CPU_DoubleU u;
1601
1602    u.ll = val;
1603    /* NaN are not treated the same way IEEE 754 does */
1604    if (unlikely(float64_is_any_nan(u.d))) {
1605        return 0;
1606    }
1607
1608    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1609}
1610
1611uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1612{
1613    CPU_DoubleU u;
1614
1615    u.ll = val;
1616    /* NaN are not treated the same way IEEE 754 does */
1617    if (unlikely(float64_is_any_nan(u.d))) {
1618        return 0;
1619    }
1620
1621    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1622}
1623
1624uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1625{
1626    CPU_DoubleU u;
1627    float64 tmp;
1628
1629    u.d = int32_to_float64(val, &env->vec_status);
1630    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1631    u.d = float64_div(u.d, tmp, &env->vec_status);
1632
1633    return u.ll;
1634}
1635
1636uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1637{
1638    CPU_DoubleU u;
1639    float64 tmp;
1640
1641    u.d = uint32_to_float64(val, &env->vec_status);
1642    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1643    u.d = float64_div(u.d, tmp, &env->vec_status);
1644
1645    return u.ll;
1646}
1647
1648uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1649{
1650    CPU_DoubleU u;
1651    float64 tmp;
1652
1653    u.ll = val;
1654    /* NaN are not treated the same way IEEE 754 does */
1655    if (unlikely(float64_is_any_nan(u.d))) {
1656        return 0;
1657    }
1658    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1659    u.d = float64_mul(u.d, tmp, &env->vec_status);
1660
1661    return float64_to_int32(u.d, &env->vec_status);
1662}
1663
1664uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1665{
1666    CPU_DoubleU u;
1667    float64 tmp;
1668
1669    u.ll = val;
1670    /* NaN are not treated the same way IEEE 754 does */
1671    if (unlikely(float64_is_any_nan(u.d))) {
1672        return 0;
1673    }
1674    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1675    u.d = float64_mul(u.d, tmp, &env->vec_status);
1676
1677    return float64_to_uint32(u.d, &env->vec_status);
1678}
1679
1680uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1681{
1682    CPU_DoubleU u1;
1683    CPU_FloatU u2;
1684
1685    u1.ll = val;
1686    u2.f = float64_to_float32(u1.d, &env->vec_status);
1687
1688    return u2.l;
1689}
1690
1691uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1692{
1693    CPU_DoubleU u2;
1694    CPU_FloatU u1;
1695
1696    u1.l = val;
1697    u2.d = float32_to_float64(u1.f, &env->vec_status);
1698
1699    return u2.ll;
1700}
1701
1702/* Double precision fixed-point arithmetic */
1703uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1704{
1705    CPU_DoubleU u1, u2;
1706
1707    u1.ll = op1;
1708    u2.ll = op2;
1709    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1710    return u1.ll;
1711}
1712
1713uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1714{
1715    CPU_DoubleU u1, u2;
1716
1717    u1.ll = op1;
1718    u2.ll = op2;
1719    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1720    return u1.ll;
1721}
1722
1723uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1724{
1725    CPU_DoubleU u1, u2;
1726
1727    u1.ll = op1;
1728    u2.ll = op2;
1729    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1730    return u1.ll;
1731}
1732
1733uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1734{
1735    CPU_DoubleU u1, u2;
1736
1737    u1.ll = op1;
1738    u2.ll = op2;
1739    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1740    return u1.ll;
1741}
1742
1743/* Double precision floating point helpers */
1744uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1745{
1746    CPU_DoubleU u1, u2;
1747
1748    u1.ll = op1;
1749    u2.ll = op2;
1750    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1751}
1752
1753uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1754{
1755    CPU_DoubleU u1, u2;
1756
1757    u1.ll = op1;
1758    u2.ll = op2;
1759    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1760}
1761
1762uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1763{
1764    CPU_DoubleU u1, u2;
1765
1766    u1.ll = op1;
1767    u2.ll = op2;
1768    return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1769}
1770
1771uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1772{
1773    /* XXX: TODO: test special values (NaN, infinites, ...) */
1774    return helper_efdtstlt(env, op1, op2);
1775}
1776
1777uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1778{
1779    /* XXX: TODO: test special values (NaN, infinites, ...) */
1780    return helper_efdtstgt(env, op1, op2);
1781}
1782
1783uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1784{
1785    /* XXX: TODO: test special values (NaN, infinites, ...) */
1786    return helper_efdtsteq(env, op1, op2);
1787}
1788
1789#define float64_to_float64(x, env) x
1790
1791
1792/* VSX_ADD_SUB - VSX floating point add/subract
1793 *   name  - instruction mnemonic
1794 *   op    - operation (add or sub)
1795 *   nels  - number of elements (1, 2 or 4)
1796 *   tp    - type (float32 or float64)
1797 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1798 *   sfprf - set FPRF
1799 */
1800#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
1801void helper_##name(CPUPPCState *env, uint32_t opcode)                        \
1802{                                                                            \
1803    ppc_vsr_t xt, xa, xb;                                                    \
1804    int i;                                                                   \
1805                                                                             \
1806    getVSR(xA(opcode), &xa, env);                                            \
1807    getVSR(xB(opcode), &xb, env);                                            \
1808    getVSR(xT(opcode), &xt, env);                                            \
1809    helper_reset_fpstatus(env);                                              \
1810                                                                             \
1811    for (i = 0; i < nels; i++) {                                             \
1812        float_status tstat = env->fp_status;                                 \
1813        set_float_exception_flags(0, &tstat);                                \
1814        xt.fld = tp##_##op(xa.fld, xb.fld, &tstat);                          \
1815        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1816                                                                             \
1817        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1818            float_invalid_op_addsub(env, sfprf, GETPC(),                     \
1819                                    tp##_classify(xa.fld) |                  \
1820                                    tp##_classify(xb.fld));                  \
1821        }                                                                    \
1822                                                                             \
1823        if (r2sp) {                                                          \
1824            xt.fld = helper_frsp(env, xt.fld);                               \
1825        }                                                                    \
1826                                                                             \
1827        if (sfprf) {                                                         \
1828            helper_compute_fprf_float64(env, xt.fld);                        \
1829        }                                                                    \
1830    }                                                                        \
1831    putVSR(xT(opcode), &xt, env);                                            \
1832    do_float_check_status(env, GETPC());                                     \
1833}
1834
1835VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1836VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1837VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1838VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1839VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1840VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1841VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1842VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1843
1844void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
1845{
1846    ppc_vsr_t xt, xa, xb;
1847    float_status tstat;
1848
1849    getVSR(rA(opcode) + 32, &xa, env);
1850    getVSR(rB(opcode) + 32, &xb, env);
1851    getVSR(rD(opcode) + 32, &xt, env);
1852    helper_reset_fpstatus(env);
1853
1854    tstat = env->fp_status;
1855    if (unlikely(Rc(opcode) != 0)) {
1856        tstat.float_rounding_mode = float_round_to_odd;
1857    }
1858
1859    set_float_exception_flags(0, &tstat);
1860    xt.f128 = float128_add(xa.f128, xb.f128, &tstat);
1861    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1862
1863    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1864        float_invalid_op_addsub(env, 1, GETPC(),
1865                                float128_classify(xa.f128) |
1866                                float128_classify(xb.f128));
1867    }
1868
1869    helper_compute_fprf_float128(env, xt.f128);
1870
1871    putVSR(rD(opcode) + 32, &xt, env);
1872    do_float_check_status(env, GETPC());
1873}
1874
1875/* VSX_MUL - VSX floating point multiply
1876 *   op    - instruction mnemonic
1877 *   nels  - number of elements (1, 2 or 4)
1878 *   tp    - type (float32 or float64)
1879 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1880 *   sfprf - set FPRF
1881 */
1882#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
1883void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1884{                                                                            \
1885    ppc_vsr_t xt, xa, xb;                                                    \
1886    int i;                                                                   \
1887                                                                             \
1888    getVSR(xA(opcode), &xa, env);                                            \
1889    getVSR(xB(opcode), &xb, env);                                            \
1890    getVSR(xT(opcode), &xt, env);                                            \
1891    helper_reset_fpstatus(env);                                              \
1892                                                                             \
1893    for (i = 0; i < nels; i++) {                                             \
1894        float_status tstat = env->fp_status;                                 \
1895        set_float_exception_flags(0, &tstat);                                \
1896        xt.fld = tp##_mul(xa.fld, xb.fld, &tstat);                           \
1897        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1898                                                                             \
1899        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1900            float_invalid_op_mul(env, sfprf, GETPC(),                        \
1901                                 tp##_classify(xa.fld) |                     \
1902                                 tp##_classify(xb.fld));                     \
1903        }                                                                    \
1904                                                                             \
1905        if (r2sp) {                                                          \
1906            xt.fld = helper_frsp(env, xt.fld);                               \
1907        }                                                                    \
1908                                                                             \
1909        if (sfprf) {                                                         \
1910            helper_compute_fprf_float64(env, xt.fld);                        \
1911        }                                                                    \
1912    }                                                                        \
1913                                                                             \
1914    putVSR(xT(opcode), &xt, env);                                            \
1915    do_float_check_status(env, GETPC());                                     \
1916}
1917
1918VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1919VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1920VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1921VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1922
1923void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
1924{
1925    ppc_vsr_t xt, xa, xb;
1926    float_status tstat;
1927
1928    getVSR(rA(opcode) + 32, &xa, env);
1929    getVSR(rB(opcode) + 32, &xb, env);
1930    getVSR(rD(opcode) + 32, &xt, env);
1931
1932    helper_reset_fpstatus(env);
1933    tstat = env->fp_status;
1934    if (unlikely(Rc(opcode) != 0)) {
1935        tstat.float_rounding_mode = float_round_to_odd;
1936    }
1937
1938    set_float_exception_flags(0, &tstat);
1939    xt.f128 = float128_mul(xa.f128, xb.f128, &tstat);
1940    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1941
1942    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1943        float_invalid_op_mul(env, 1, GETPC(),
1944                             float128_classify(xa.f128) |
1945                             float128_classify(xb.f128));
1946    }
1947    helper_compute_fprf_float128(env, xt.f128);
1948
1949    putVSR(rD(opcode) + 32, &xt, env);
1950    do_float_check_status(env, GETPC());
1951}
1952
1953/* VSX_DIV - VSX floating point divide
1954 *   op    - instruction mnemonic
1955 *   nels  - number of elements (1, 2 or 4)
1956 *   tp    - type (float32 or float64)
1957 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1958 *   sfprf - set FPRF
1959 */
1960#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
1961void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1962{                                                                             \
1963    ppc_vsr_t xt, xa, xb;                                                     \
1964    int i;                                                                    \
1965                                                                              \
1966    getVSR(xA(opcode), &xa, env);                                             \
1967    getVSR(xB(opcode), &xb, env);                                             \
1968    getVSR(xT(opcode), &xt, env);                                             \
1969    helper_reset_fpstatus(env);                                               \
1970                                                                              \
1971    for (i = 0; i < nels; i++) {                                              \
1972        float_status tstat = env->fp_status;                                  \
1973        set_float_exception_flags(0, &tstat);                                 \
1974        xt.fld = tp##_div(xa.fld, xb.fld, &tstat);                            \
1975        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1976                                                                              \
1977        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1978            float_invalid_op_div(env, sfprf, GETPC(),                         \
1979                                 tp##_classify(xa.fld) |                      \
1980                                 tp##_classify(xb.fld));                      \
1981        }                                                                     \
1982        if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {   \
1983            float_zero_divide_excp(env, GETPC());                             \
1984        }                                                                     \
1985                                                                              \
1986        if (r2sp) {                                                           \
1987            xt.fld = helper_frsp(env, xt.fld);                                \
1988        }                                                                     \
1989                                                                              \
1990        if (sfprf) {                                                          \
1991            helper_compute_fprf_float64(env, xt.fld);                         \
1992        }                                                                     \
1993    }                                                                         \
1994                                                                              \
1995    putVSR(xT(opcode), &xt, env);                                             \
1996    do_float_check_status(env, GETPC());                                      \
1997}
1998
1999VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
2000VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
2001VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
2002VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
2003
2004void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
2005{
2006    ppc_vsr_t xt, xa, xb;
2007    float_status tstat;
2008
2009    getVSR(rA(opcode) + 32, &xa, env);
2010    getVSR(rB(opcode) + 32, &xb, env);
2011    getVSR(rD(opcode) + 32, &xt, env);
2012
2013    helper_reset_fpstatus(env);
2014    tstat = env->fp_status;
2015    if (unlikely(Rc(opcode) != 0)) {
2016        tstat.float_rounding_mode = float_round_to_odd;
2017    }
2018
2019    set_float_exception_flags(0, &tstat);
2020    xt.f128 = float128_div(xa.f128, xb.f128, &tstat);
2021    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2022
2023    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
2024        float_invalid_op_div(env, 1, GETPC(),
2025                             float128_classify(xa.f128) |
2026                             float128_classify(xb.f128));
2027    }
2028    if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
2029        float_zero_divide_excp(env, GETPC());
2030    }
2031
2032    helper_compute_fprf_float128(env, xt.f128);
2033    putVSR(rD(opcode) + 32, &xt, env);
2034    do_float_check_status(env, GETPC());
2035}
2036
2037/* VSX_RE  - VSX floating point reciprocal estimate
2038 *   op    - instruction mnemonic
2039 *   nels  - number of elements (1, 2 or 4)
2040 *   tp    - type (float32 or float64)
2041 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2042 *   sfprf - set FPRF
2043 */
2044#define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
2045void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2046{                                                                             \
2047    ppc_vsr_t xt, xb;                                                         \
2048    int i;                                                                    \
2049                                                                              \
2050    getVSR(xB(opcode), &xb, env);                                             \
2051    getVSR(xT(opcode), &xt, env);                                             \
2052    helper_reset_fpstatus(env);                                               \
2053                                                                              \
2054    for (i = 0; i < nels; i++) {                                              \
2055        if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
2056            float_invalid_op_vxsnan(env, GETPC());                            \
2057        }                                                                     \
2058        xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status);                 \
2059                                                                              \
2060        if (r2sp) {                                                           \
2061            xt.fld = helper_frsp(env, xt.fld);                                \
2062        }                                                                     \
2063                                                                              \
2064        if (sfprf) {                                                          \
2065            helper_compute_fprf_float64(env, xt.fld);                         \
2066        }                                                                     \
2067    }                                                                         \
2068                                                                              \
2069    putVSR(xT(opcode), &xt, env);                                             \
2070    do_float_check_status(env, GETPC());                                      \
2071}
2072
2073VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
2074VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
2075VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
2076VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
2077
2078/* VSX_SQRT - VSX floating point square root
2079 *   op    - instruction mnemonic
2080 *   nels  - number of elements (1, 2 or 4)
2081 *   tp    - type (float32 or float64)
2082 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2083 *   sfprf - set FPRF
2084 */
2085#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
2086void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2087{                                                                            \
2088    ppc_vsr_t xt, xb;                                                        \
2089    int i;                                                                   \
2090                                                                             \
2091    getVSR(xB(opcode), &xb, env);                                            \
2092    getVSR(xT(opcode), &xt, env);                                            \
2093    helper_reset_fpstatus(env);                                              \
2094                                                                             \
2095    for (i = 0; i < nels; i++) {                                             \
2096        float_status tstat = env->fp_status;                                 \
2097        set_float_exception_flags(0, &tstat);                                \
2098        xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2099        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2100                                                                             \
2101        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2102            if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2103                float_invalid_op_vxsqrt(env, sfprf, GETPC());                \
2104            } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2105                float_invalid_op_vxsnan(env, GETPC());                       \
2106            }                                                                \
2107        }                                                                    \
2108                                                                             \
2109        if (r2sp) {                                                          \
2110            xt.fld = helper_frsp(env, xt.fld);                               \
2111        }                                                                    \
2112                                                                             \
2113        if (sfprf) {                                                         \
2114            helper_compute_fprf_float64(env, xt.fld);                        \
2115        }                                                                    \
2116    }                                                                        \
2117                                                                             \
2118    putVSR(xT(opcode), &xt, env);                                            \
2119    do_float_check_status(env, GETPC());                                     \
2120}
2121
2122VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2123VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2124VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2125VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2126
2127/* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2128 *   op    - instruction mnemonic
2129 *   nels  - number of elements (1, 2 or 4)
2130 *   tp    - type (float32 or float64)
2131 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2132 *   sfprf - set FPRF
2133 */
2134#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
2135void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2136{                                                                            \
2137    ppc_vsr_t xt, xb;                                                        \
2138    int i;                                                                   \
2139                                                                             \
2140    getVSR(xB(opcode), &xb, env);                                            \
2141    getVSR(xT(opcode), &xt, env);                                            \
2142    helper_reset_fpstatus(env);                                              \
2143                                                                             \
2144    for (i = 0; i < nels; i++) {                                             \
2145        float_status tstat = env->fp_status;                                 \
2146        set_float_exception_flags(0, &tstat);                                \
2147        xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2148        xt.fld = tp##_div(tp##_one, xt.fld, &tstat);                         \
2149        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2150                                                                             \
2151        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2152            if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2153                float_invalid_op_vxsqrt(env, sfprf, GETPC());                \
2154            } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2155                float_invalid_op_vxsnan(env, GETPC());                       \
2156            }                                                                \
2157        }                                                                    \
2158                                                                             \
2159        if (r2sp) {                                                          \
2160            xt.fld = helper_frsp(env, xt.fld);                               \
2161        }                                                                    \
2162                                                                             \
2163        if (sfprf) {                                                         \
2164            helper_compute_fprf_float64(env, xt.fld);                        \
2165        }                                                                    \
2166    }                                                                        \
2167                                                                             \
2168    putVSR(xT(opcode), &xt, env);                                            \
2169    do_float_check_status(env, GETPC());                                     \
2170}
2171
2172VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2173VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2174VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2175VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2176
2177/* VSX_TDIV - VSX floating point test for divide
2178 *   op    - instruction mnemonic
2179 *   nels  - number of elements (1, 2 or 4)
2180 *   tp    - type (float32 or float64)
2181 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2182 *   emin  - minimum unbiased exponent
2183 *   emax  - maximum unbiased exponent
2184 *   nbits - number of fraction bits
2185 */
2186#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2187void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2188{                                                                       \
2189    ppc_vsr_t xa, xb;                                                   \
2190    int i;                                                              \
2191    int fe_flag = 0;                                                    \
2192    int fg_flag = 0;                                                    \
2193                                                                        \
2194    getVSR(xA(opcode), &xa, env);                                       \
2195    getVSR(xB(opcode), &xb, env);                                       \
2196                                                                        \
2197    for (i = 0; i < nels; i++) {                                        \
2198        if (unlikely(tp##_is_infinity(xa.fld) ||                        \
2199                     tp##_is_infinity(xb.fld) ||                        \
2200                     tp##_is_zero(xb.fld))) {                           \
2201            fe_flag = 1;                                                \
2202            fg_flag = 1;                                                \
2203        } else {                                                        \
2204            int e_a = ppc_##tp##_get_unbiased_exp(xa.fld);              \
2205            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2206                                                                        \
2207            if (unlikely(tp##_is_any_nan(xa.fld) ||                     \
2208                         tp##_is_any_nan(xb.fld))) {                    \
2209                fe_flag = 1;                                            \
2210            } else if ((e_b <= emin) || (e_b >= (emax-2))) {            \
2211                fe_flag = 1;                                            \
2212            } else if (!tp##_is_zero(xa.fld) &&                         \
2213                       (((e_a - e_b) >= emax) ||                        \
2214                        ((e_a - e_b) <= (emin+1)) ||                    \
2215                         (e_a <= (emin+nbits)))) {                      \
2216                fe_flag = 1;                                            \
2217            }                                                           \
2218                                                                        \
2219            if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2220                /* XB is not zero because of the above check and */     \
2221                /* so must be denormalized.                      */     \
2222                fg_flag = 1;                                            \
2223            }                                                           \
2224        }                                                               \
2225    }                                                                   \
2226                                                                        \
2227    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2228}
2229
2230VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2231VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2232VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2233
2234/* VSX_TSQRT - VSX floating point test for square root
2235 *   op    - instruction mnemonic
2236 *   nels  - number of elements (1, 2 or 4)
2237 *   tp    - type (float32 or float64)
2238 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2239 *   emin  - minimum unbiased exponent
2240 *   emax  - maximum unbiased exponent
2241 *   nbits - number of fraction bits
2242 */
2243#define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2244void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2245{                                                                       \
2246    ppc_vsr_t xa, xb;                                                   \
2247    int i;                                                              \
2248    int fe_flag = 0;                                                    \
2249    int fg_flag = 0;                                                    \
2250                                                                        \
2251    getVSR(xA(opcode), &xa, env);                                       \
2252    getVSR(xB(opcode), &xb, env);                                       \
2253                                                                        \
2254    for (i = 0; i < nels; i++) {                                        \
2255        if (unlikely(tp##_is_infinity(xb.fld) ||                        \
2256                     tp##_is_zero(xb.fld))) {                           \
2257            fe_flag = 1;                                                \
2258            fg_flag = 1;                                                \
2259        } else {                                                        \
2260            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2261                                                                        \
2262            if (unlikely(tp##_is_any_nan(xb.fld))) {                    \
2263                fe_flag = 1;                                            \
2264            } else if (unlikely(tp##_is_zero(xb.fld))) {                \
2265                fe_flag = 1;                                            \
2266            } else if (unlikely(tp##_is_neg(xb.fld))) {                 \
2267                fe_flag = 1;                                            \
2268            } else if (!tp##_is_zero(xb.fld) &&                         \
2269                      (e_b <= (emin+nbits))) {                          \
2270                fe_flag = 1;                                            \
2271            }                                                           \
2272                                                                        \
2273            if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2274                /* XB is not zero because of the above check and */     \
2275                /* therefore must be denormalized.               */     \
2276                fg_flag = 1;                                            \
2277            }                                                           \
2278        }                                                               \
2279    }                                                                   \
2280                                                                        \
2281    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2282}
2283
2284VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2285VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2286VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2287
2288/* VSX_MADD - VSX floating point muliply/add variations
2289 *   op    - instruction mnemonic
2290 *   nels  - number of elements (1, 2 or 4)
2291 *   tp    - type (float32 or float64)
2292 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2293 *   maddflgs - flags for the float*muladd routine that control the
2294 *           various forms (madd, msub, nmadd, nmsub)
2295 *   afrm  - A form (1=A, 0=M)
2296 *   sfprf - set FPRF
2297 */
2298#define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp)              \
2299void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2300{                                                                             \
2301    ppc_vsr_t xt_in, xa, xb, xt_out;                                          \
2302    ppc_vsr_t *b, *c;                                                         \
2303    int i;                                                                    \
2304                                                                              \
2305    if (afrm) { /* AxB + T */                                                 \
2306        b = &xb;                                                              \
2307        c = &xt_in;                                                           \
2308    } else { /* AxT + B */                                                    \
2309        b = &xt_in;                                                           \
2310        c = &xb;                                                              \
2311    }                                                                         \
2312                                                                              \
2313    getVSR(xA(opcode), &xa, env);                                             \
2314    getVSR(xB(opcode), &xb, env);                                             \
2315    getVSR(xT(opcode), &xt_in, env);                                          \
2316                                                                              \
2317    xt_out = xt_in;                                                           \
2318                                                                              \
2319    helper_reset_fpstatus(env);                                               \
2320                                                                              \
2321    for (i = 0; i < nels; i++) {                                              \
2322        float_status tstat = env->fp_status;                                  \
2323        set_float_exception_flags(0, &tstat);                                 \
2324        if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2325            /* Avoid double rounding errors by rounding the intermediate */   \
2326            /* result to odd.                                            */   \
2327            set_float_rounding_mode(float_round_to_zero, &tstat);             \
2328            xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2329                                       maddflgs, &tstat);                     \
2330            xt_out.fld |= (get_float_exception_flags(&tstat) &                \
2331                              float_flag_inexact) != 0;                       \
2332        } else {                                                              \
2333            xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2334                                        maddflgs, &tstat);                    \
2335        }                                                                     \
2336        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2337                                                                              \
2338        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2339            tp##_maddsub_update_excp(env, xa.fld, b->fld,                     \
2340                                     c->fld, maddflgs, GETPC());              \
2341        }                                                                     \
2342                                                                              \
2343        if (r2sp) {                                                           \
2344            xt_out.fld = helper_frsp(env, xt_out.fld);                        \
2345        }                                                                     \
2346                                                                              \
2347        if (sfprf) {                                                          \
2348            helper_compute_fprf_float64(env, xt_out.fld);                     \
2349        }                                                                     \
2350    }                                                                         \
2351    putVSR(xT(opcode), &xt_out, env);                                         \
2352    do_float_check_status(env, GETPC());                                      \
2353}
2354
2355VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
2356VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
2357VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
2358VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
2359VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
2360VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
2361VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
2362VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
2363
2364VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
2365VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
2366VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
2367VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
2368VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
2369VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
2370VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
2371VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
2372
2373VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
2374VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
2375VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
2376VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
2377VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
2378VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
2379VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
2380VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
2381
2382VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
2383VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
2384VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
2385VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
2386VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
2387VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
2388VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
2389VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
2390
2391/* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2392 *   op    - instruction mnemonic
2393 *   cmp   - comparison operation
2394 *   exp   - expected result of comparison
2395 *   svxvc - set VXVC bit
2396 */
2397#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc)                                \
2398void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2399{                                                                             \
2400    ppc_vsr_t xt, xa, xb;                                                     \
2401    bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false;            \
2402                                                                              \
2403    getVSR(xA(opcode), &xa, env);                                             \
2404    getVSR(xB(opcode), &xb, env);                                             \
2405    getVSR(xT(opcode), &xt, env);                                             \
2406                                                                              \
2407    if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||              \
2408        float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {              \
2409        vxsnan_flag = true;                                                   \
2410        if (fpscr_ve == 0 && svxvc) {                                         \
2411            vxvc_flag = true;                                                 \
2412        }                                                                     \
2413    } else if (svxvc) {                                                       \
2414        vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2415            float64_is_quiet_nan(xb.VsrD(0), &env->fp_status);                \
2416    }                                                                         \
2417    if (vxsnan_flag) {                                                        \
2418        float_invalid_op_vxsnan(env, GETPC());                                \
2419    }                                                                         \
2420    if (vxvc_flag) {                                                          \
2421        float_invalid_op_vxvc(env, 0, GETPC());                               \
2422    }                                                                         \
2423    vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag);                        \
2424                                                                              \
2425    if (!vex_flag) {                                                          \
2426        if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) {  \
2427            xt.VsrD(0) = -1;                                                  \
2428            xt.VsrD(1) = 0;                                                   \
2429        } else {                                                              \
2430            xt.VsrD(0) = 0;                                                   \
2431            xt.VsrD(1) = 0;                                                   \
2432        }                                                                     \
2433    }                                                                         \
2434    putVSR(xT(opcode), &xt, env);                                             \
2435    do_float_check_status(env, GETPC());                                      \
2436}
2437
2438VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2439VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2440VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2441VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2442
2443void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode)
2444{
2445    ppc_vsr_t xa, xb;
2446    int64_t exp_a, exp_b;
2447    uint32_t cc;
2448
2449    getVSR(xA(opcode), &xa, env);
2450    getVSR(xB(opcode), &xb, env);
2451
2452    exp_a = extract64(xa.VsrD(0), 52, 11);
2453    exp_b = extract64(xb.VsrD(0), 52, 11);
2454
2455    if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||
2456                 float64_is_any_nan(xb.VsrD(0)))) {
2457        cc = CRF_SO;
2458    } else {
2459        if (exp_a < exp_b) {
2460            cc = CRF_LT;
2461        } else if (exp_a > exp_b) {
2462            cc = CRF_GT;
2463        } else {
2464            cc = CRF_EQ;
2465        }
2466    }
2467
2468    env->fpscr &= ~(0x0F << FPSCR_FPRF);
2469    env->fpscr |= cc << FPSCR_FPRF;
2470    env->crf[BF(opcode)] = cc;
2471
2472    do_float_check_status(env, GETPC());
2473}
2474
2475void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode)
2476{
2477    ppc_vsr_t xa, xb;
2478    int64_t exp_a, exp_b;
2479    uint32_t cc;
2480
2481    getVSR(rA(opcode) + 32, &xa, env);
2482    getVSR(rB(opcode) + 32, &xb, env);
2483
2484    exp_a = extract64(xa.VsrD(0), 48, 15);
2485    exp_b = extract64(xb.VsrD(0), 48, 15);
2486
2487    if (unlikely(float128_is_any_nan(xa.f128) ||
2488                 float128_is_any_nan(xb.f128))) {
2489        cc = CRF_SO;
2490    } else {
2491        if (exp_a < exp_b) {
2492            cc = CRF_LT;
2493        } else if (exp_a > exp_b) {
2494            cc = CRF_GT;
2495        } else {
2496            cc = CRF_EQ;
2497        }
2498    }
2499
2500    env->fpscr &= ~(0x0F << FPSCR_FPRF);
2501    env->fpscr |= cc << FPSCR_FPRF;
2502    env->crf[BF(opcode)] = cc;
2503
2504    do_float_check_status(env, GETPC());
2505}
2506
2507#define VSX_SCALAR_CMP(op, ordered)                                      \
2508void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
2509{                                                                        \
2510    ppc_vsr_t xa, xb;                                                    \
2511    uint32_t cc = 0;                                                     \
2512    bool vxsnan_flag = false, vxvc_flag = false;                         \
2513                                                                         \
2514    helper_reset_fpstatus(env);                                          \
2515    getVSR(xA(opcode), &xa, env);                                        \
2516    getVSR(xB(opcode), &xb, env);                                        \
2517                                                                         \
2518    if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||         \
2519        float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {         \
2520        vxsnan_flag = true;                                              \
2521        cc = CRF_SO;                                                     \
2522        if (fpscr_ve == 0 && ordered) {                                  \
2523            vxvc_flag = true;                                            \
2524        }                                                                \
2525    } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2526               float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) {      \
2527        cc = CRF_SO;                                                     \
2528        if (ordered) {                                                   \
2529            vxvc_flag = true;                                            \
2530        }                                                                \
2531    }                                                                    \
2532    if (vxsnan_flag) {                                                   \
2533        float_invalid_op_vxsnan(env, GETPC());                           \
2534    }                                                                    \
2535    if (vxvc_flag) {                                                     \
2536        float_invalid_op_vxvc(env, 0, GETPC());                          \
2537    }                                                                    \
2538                                                                         \
2539    if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {           \
2540        cc |= CRF_LT;                                                    \
2541    } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {   \
2542        cc |= CRF_GT;                                                    \
2543    } else {                                                             \
2544        cc |= CRF_EQ;                                                    \
2545    }                                                                    \
2546                                                                         \
2547    env->fpscr &= ~(0x0F << FPSCR_FPRF);                                 \
2548    env->fpscr |= cc << FPSCR_FPRF;                                      \
2549    env->crf[BF(opcode)] = cc;                                           \
2550                                                                         \
2551    do_float_check_status(env, GETPC());                                 \
2552}
2553
2554VSX_SCALAR_CMP(xscmpodp, 1)
2555VSX_SCALAR_CMP(xscmpudp, 0)
2556
2557#define VSX_SCALAR_CMPQ(op, ordered)                                    \
2558void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2559{                                                                       \
2560    ppc_vsr_t xa, xb;                                                   \
2561    uint32_t cc = 0;                                                    \
2562    bool vxsnan_flag = false, vxvc_flag = false;                        \
2563                                                                        \
2564    helper_reset_fpstatus(env);                                         \
2565    getVSR(rA(opcode) + 32, &xa, env);                                  \
2566    getVSR(rB(opcode) + 32, &xb, env);                                  \
2567                                                                        \
2568    if (float128_is_signaling_nan(xa.f128, &env->fp_status) ||          \
2569        float128_is_signaling_nan(xb.f128, &env->fp_status)) {          \
2570        vxsnan_flag = true;                                             \
2571        cc = CRF_SO;                                                    \
2572        if (fpscr_ve == 0 && ordered) {                                 \
2573            vxvc_flag = true;                                           \
2574        }                                                               \
2575    } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) ||       \
2576               float128_is_quiet_nan(xb.f128, &env->fp_status)) {       \
2577        cc = CRF_SO;                                                    \
2578        if (ordered) {                                                  \
2579            vxvc_flag = true;                                           \
2580        }                                                               \
2581    }                                                                   \
2582    if (vxsnan_flag) {                                                  \
2583        float_invalid_op_vxsnan(env, GETPC());                          \
2584    }                                                                   \
2585    if (vxvc_flag) {                                                    \
2586        float_invalid_op_vxvc(env, 0, GETPC());                         \
2587    }                                                                   \
2588                                                                        \
2589    if (float128_lt(xa.f128, xb.f128, &env->fp_status)) {               \
2590        cc |= CRF_LT;                                                   \
2591    } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) {       \
2592        cc |= CRF_GT;                                                   \
2593    } else {                                                            \
2594        cc |= CRF_EQ;                                                   \
2595    }                                                                   \
2596                                                                        \
2597    env->fpscr &= ~(0x0F << FPSCR_FPRF);                                \
2598    env->fpscr |= cc << FPSCR_FPRF;                                     \
2599    env->crf[BF(opcode)] = cc;                                          \
2600                                                                        \
2601    do_float_check_status(env, GETPC());                                \
2602}
2603
2604VSX_SCALAR_CMPQ(xscmpoqp, 1)
2605VSX_SCALAR_CMPQ(xscmpuqp, 0)
2606
2607/* VSX_MAX_MIN - VSX floating point maximum/minimum
2608 *   name  - instruction mnemonic
2609 *   op    - operation (max or min)
2610 *   nels  - number of elements (1, 2 or 4)
2611 *   tp    - type (float32 or float64)
2612 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2613 */
2614#define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2615void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2616{                                                                             \
2617    ppc_vsr_t xt, xa, xb;                                                     \
2618    int i;                                                                    \
2619                                                                              \
2620    getVSR(xA(opcode), &xa, env);                                             \
2621    getVSR(xB(opcode), &xb, env);                                             \
2622    getVSR(xT(opcode), &xt, env);                                             \
2623                                                                              \
2624    for (i = 0; i < nels; i++) {                                              \
2625        xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status);                  \
2626        if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) ||        \
2627                     tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
2628            float_invalid_op_vxsnan(env, GETPC());                            \
2629        }                                                                     \
2630    }                                                                         \
2631                                                                              \
2632    putVSR(xT(opcode), &xt, env);                                             \
2633    do_float_check_status(env, GETPC());                                      \
2634}
2635
2636VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2637VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2638VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2639VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2640VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2641VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2642
2643#define VSX_MAX_MINC(name, max)                                               \
2644void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2645{                                                                             \
2646    ppc_vsr_t xt, xa, xb;                                                     \
2647    bool vxsnan_flag = false, vex_flag = false;                               \
2648                                                                              \
2649    getVSR(rA(opcode) + 32, &xa, env);                                        \
2650    getVSR(rB(opcode) + 32, &xb, env);                                        \
2651    getVSR(rD(opcode) + 32, &xt, env);                                        \
2652                                                                              \
2653    if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||                            \
2654                 float64_is_any_nan(xb.VsrD(0)))) {                           \
2655        if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||          \
2656            float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
2657            vxsnan_flag = true;                                               \
2658        }                                                                     \
2659        xt.VsrD(0) = xb.VsrD(0);                                              \
2660    } else if ((max &&                                                        \
2661               !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) ||       \
2662               (!max &&                                                       \
2663               float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) {        \
2664        xt.VsrD(0) = xa.VsrD(0);                                              \
2665    } else {                                                                  \
2666        xt.VsrD(0) = xb.VsrD(0);                                              \
2667    }                                                                         \
2668                                                                              \
2669    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2670    if (vxsnan_flag) {                                                        \
2671        float_invalid_op_vxsnan(env, GETPC());                                \
2672    }                                                                         \
2673    if (!vex_flag) {                                                          \
2674        putVSR(rD(opcode) + 32, &xt, env);                                    \
2675    }                                                                         \
2676}                                                                             \
2677
2678VSX_MAX_MINC(xsmaxcdp, 1);
2679VSX_MAX_MINC(xsmincdp, 0);
2680
2681#define VSX_MAX_MINJ(name, max)                                               \
2682void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2683{                                                                             \
2684    ppc_vsr_t xt, xa, xb;                                                     \
2685    bool vxsnan_flag = false, vex_flag = false;                               \
2686                                                                              \
2687    getVSR(rA(opcode) + 32, &xa, env);                                        \
2688    getVSR(rB(opcode) + 32, &xb, env);                                        \
2689    getVSR(rD(opcode) + 32, &xt, env);                                        \
2690                                                                              \
2691    if (unlikely(float64_is_any_nan(xa.VsrD(0)))) {                           \
2692        if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) {          \
2693            vxsnan_flag = true;                                               \
2694        }                                                                     \
2695        xt.VsrD(0) = xa.VsrD(0);                                              \
2696    } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) {                    \
2697        if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
2698            vxsnan_flag = true;                                               \
2699        }                                                                     \
2700        xt.VsrD(0) = xb.VsrD(0);                                              \
2701    } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) {  \
2702        if (max) {                                                            \
2703            if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2704                xt.VsrD(0) = 0ULL;                                            \
2705            } else {                                                          \
2706                xt.VsrD(0) = 0x8000000000000000ULL;                           \
2707            }                                                                 \
2708        } else {                                                              \
2709            if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) {   \
2710                xt.VsrD(0) = 0x8000000000000000ULL;                           \
2711            } else {                                                          \
2712                xt.VsrD(0) = 0ULL;                                            \
2713            }                                                                 \
2714        }                                                                     \
2715    } else if ((max &&                                                        \
2716               !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) ||       \
2717               (!max &&                                                       \
2718               float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) {        \
2719        xt.VsrD(0) = xa.VsrD(0);                                              \
2720    } else {                                                                  \
2721        xt.VsrD(0) = xb.VsrD(0);                                              \
2722    }                                                                         \
2723                                                                              \
2724    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2725    if (vxsnan_flag) {                                                        \
2726        float_invalid_op_vxsnan(env, GETPC());                                \
2727    }                                                                         \
2728    if (!vex_flag) {                                                          \
2729        putVSR(rD(opcode) + 32, &xt, env);                                    \
2730    }                                                                         \
2731}                                                                             \
2732
2733VSX_MAX_MINJ(xsmaxjdp, 1);
2734VSX_MAX_MINJ(xsminjdp, 0);
2735
2736/* VSX_CMP - VSX floating point compare
2737 *   op    - instruction mnemonic
2738 *   nels  - number of elements (1, 2 or 4)
2739 *   tp    - type (float32 or float64)
2740 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2741 *   cmp   - comparison operation
2742 *   svxvc - set VXVC bit
2743 *   exp   - expected result of comparison
2744 */
2745#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
2746void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2747{                                                                         \
2748    ppc_vsr_t xt, xa, xb;                                                 \
2749    int i;                                                                \
2750    int all_true = 1;                                                     \
2751    int all_false = 1;                                                    \
2752                                                                          \
2753    getVSR(xA(opcode), &xa, env);                                         \
2754    getVSR(xB(opcode), &xb, env);                                         \
2755    getVSR(xT(opcode), &xt, env);                                         \
2756                                                                          \
2757    for (i = 0; i < nels; i++) {                                          \
2758        if (unlikely(tp##_is_any_nan(xa.fld) ||                           \
2759                     tp##_is_any_nan(xb.fld))) {                          \
2760            if (tp##_is_signaling_nan(xa.fld, &env->fp_status) ||         \
2761                tp##_is_signaling_nan(xb.fld, &env->fp_status)) {         \
2762                float_invalid_op_vxsnan(env, GETPC());                    \
2763            }                                                             \
2764            if (svxvc) {                                                  \
2765                float_invalid_op_vxvc(env, 0, GETPC());                   \
2766            }                                                             \
2767            xt.fld = 0;                                                   \
2768            all_true = 0;                                                 \
2769        } else {                                                          \
2770            if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) {     \
2771                xt.fld = -1;                                              \
2772                all_false = 0;                                            \
2773            } else {                                                      \
2774                xt.fld = 0;                                               \
2775                all_true = 0;                                             \
2776            }                                                             \
2777        }                                                                 \
2778    }                                                                     \
2779                                                                          \
2780    putVSR(xT(opcode), &xt, env);                                         \
2781    if ((opcode >> (31-21)) & 1) {                                        \
2782        env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);       \
2783    }                                                                     \
2784    do_float_check_status(env, GETPC());                                  \
2785 }
2786
2787VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2788VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2789VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2790VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2791VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2792VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2793VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2794VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2795
2796/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2797 *   op    - instruction mnemonic
2798 *   nels  - number of elements (1, 2 or 4)
2799 *   stp   - source type (float32 or float64)
2800 *   ttp   - target type (float32 or float64)
2801 *   sfld  - source vsr_t field
2802 *   tfld  - target vsr_t field (f32 or f64)
2803 *   sfprf - set FPRF
2804 */
2805#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2806void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2807{                                                                  \
2808    ppc_vsr_t xt, xb;                                              \
2809    int i;                                                         \
2810                                                                   \
2811    getVSR(xB(opcode), &xb, env);                                  \
2812    getVSR(xT(opcode), &xt, env);                                  \
2813                                                                   \
2814    for (i = 0; i < nels; i++) {                                   \
2815        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);        \
2816        if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2817                                            &env->fp_status))) {   \
2818            float_invalid_op_vxsnan(env, GETPC());                 \
2819            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2820        }                                                          \
2821        if (sfprf) {                                               \
2822            helper_compute_fprf_##ttp(env, xt.tfld);               \
2823        }                                                          \
2824    }                                                              \
2825                                                                   \
2826    putVSR(xT(opcode), &xt, env);                                  \
2827    do_float_check_status(env, GETPC());                           \
2828}
2829
2830VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2831VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2832VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
2833VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
2834
2835/* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2836 *   op    - instruction mnemonic
2837 *   nels  - number of elements (1, 2 or 4)
2838 *   stp   - source type (float32 or float64)
2839 *   ttp   - target type (float32 or float64)
2840 *   sfld  - source vsr_t field
2841 *   tfld  - target vsr_t field (f32 or f64)
2842 *   sfprf - set FPRF
2843 */
2844#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2845void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2846{                                                                       \
2847    ppc_vsr_t xt, xb;                                                   \
2848    int i;                                                              \
2849                                                                        \
2850    getVSR(rB(opcode) + 32, &xb, env);                                  \
2851    getVSR(rD(opcode) + 32, &xt, env);                                  \
2852                                                                        \
2853    for (i = 0; i < nels; i++) {                                        \
2854        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
2855        if (unlikely(stp##_is_signaling_nan(xb.sfld,                    \
2856                                            &env->fp_status))) {        \
2857            float_invalid_op_vxsnan(env, GETPC());                      \
2858            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                      \
2859        }                                                               \
2860        if (sfprf) {                                                    \
2861            helper_compute_fprf_##ttp(env, xt.tfld);                    \
2862        }                                                               \
2863    }                                                                   \
2864                                                                        \
2865    putVSR(rD(opcode) + 32, &xt, env);                                  \
2866    do_float_check_status(env, GETPC());                                \
2867}
2868
2869VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2870
2871/* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2872 *                       involving one half precision value
2873 *   op    - instruction mnemonic
2874 *   nels  - number of elements (1, 2 or 4)
2875 *   stp   - source type
2876 *   ttp   - target type
2877 *   sfld  - source vsr_t field
2878 *   tfld  - target vsr_t field
2879 *   sfprf - set FPRF
2880 */
2881#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2882void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2883{                                                                  \
2884    ppc_vsr_t xt, xb;                                              \
2885    int i;                                                         \
2886                                                                   \
2887    getVSR(xB(opcode), &xb, env);                                  \
2888    memset(&xt, 0, sizeof(xt));                                    \
2889                                                                   \
2890    for (i = 0; i < nels; i++) {                                   \
2891        xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status);     \
2892        if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2893                                            &env->fp_status))) {   \
2894            float_invalid_op_vxsnan(env, GETPC());                 \
2895            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2896        }                                                          \
2897        if (sfprf) {                                               \
2898            helper_compute_fprf_##ttp(env, xt.tfld);               \
2899        }                                                          \
2900    }                                                              \
2901                                                                   \
2902    putVSR(xT(opcode), &xt, env);                                  \
2903    do_float_check_status(env, GETPC());                           \
2904}
2905
2906VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2907VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2908VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i  + 1), 0)
2909VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2910
2911/*
2912 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2913 * added to this later.
2914 */
2915void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode)
2916{
2917    ppc_vsr_t xt, xb;
2918    float_status tstat;
2919
2920    getVSR(rB(opcode) + 32, &xb, env);
2921    memset(&xt, 0, sizeof(xt));
2922
2923    tstat = env->fp_status;
2924    if (unlikely(Rc(opcode) != 0)) {
2925        tstat.float_rounding_mode = float_round_to_odd;
2926    }
2927
2928    xt.VsrD(0) = float128_to_float64(xb.f128, &tstat);
2929    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2930    if (unlikely(float128_is_signaling_nan(xb.f128, &tstat))) {
2931        float_invalid_op_vxsnan(env, GETPC());
2932        xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0));
2933    }
2934    helper_compute_fprf_float64(env, xt.VsrD(0));
2935
2936    putVSR(rD(opcode) + 32, &xt, env);
2937    do_float_check_status(env, GETPC());
2938}
2939
2940uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2941{
2942    float_status tstat = env->fp_status;
2943    set_float_exception_flags(0, &tstat);
2944
2945    return (uint64_t)float64_to_float32(xb, &tstat) << 32;
2946}
2947
2948uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2949{
2950    float_status tstat = env->fp_status;
2951    set_float_exception_flags(0, &tstat);
2952
2953    return float32_to_float64(xb >> 32, &tstat);
2954}
2955
2956/* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2957 *   op    - instruction mnemonic
2958 *   nels  - number of elements (1, 2 or 4)
2959 *   stp   - source type (float32 or float64)
2960 *   ttp   - target type (int32, uint32, int64 or uint64)
2961 *   sfld  - source vsr_t field
2962 *   tfld  - target vsr_t field
2963 *   rnan  - resulting NaN
2964 */
2965#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
2966void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2967{                                                                            \
2968    int all_flags = env->fp_status.float_exception_flags, flags;             \
2969    ppc_vsr_t xt, xb;                                                        \
2970    int i;                                                                   \
2971                                                                             \
2972    getVSR(xB(opcode), &xb, env);                                            \
2973    getVSR(xT(opcode), &xt, env);                                            \
2974                                                                             \
2975    for (i = 0; i < nels; i++) {                                             \
2976        env->fp_status.float_exception_flags = 0;                            \
2977        xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, &env->fp_status);  \
2978        flags = env->fp_status.float_exception_flags;                        \
2979        if (unlikely(flags & float_flag_invalid)) {                          \
2980            float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb.sfld));     \
2981            xt.tfld = rnan;                                                  \
2982        }                                                                    \
2983        all_flags |= flags;                                                  \
2984    }                                                                        \
2985                                                                             \
2986    putVSR(xT(opcode), &xt, env);                                            \
2987    env->fp_status.float_exception_flags = all_flags;                        \
2988    do_float_check_status(env, GETPC());                                     \
2989}
2990
2991VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2992                  0x8000000000000000ULL)
2993VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
2994                  0x80000000U)
2995VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
2996VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
2997VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
2998                  0x8000000000000000ULL)
2999VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
3000                  0x80000000U)
3001VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
3002VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
3003VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
3004                  0x8000000000000000ULL)
3005VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
3006VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
3007VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
3008
3009/* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
3010 *   op    - instruction mnemonic
3011 *   stp   - source type (float32 or float64)
3012 *   ttp   - target type (int32, uint32, int64 or uint64)
3013 *   sfld  - source vsr_t field
3014 *   tfld  - target vsr_t field
3015 *   rnan  - resulting NaN
3016 */
3017#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan)             \
3018void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
3019{                                                                            \
3020    ppc_vsr_t xt, xb;                                                        \
3021                                                                             \
3022    getVSR(rB(opcode) + 32, &xb, env);                                       \
3023    memset(&xt, 0, sizeof(xt));                                              \
3024                                                                             \
3025    xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, &env->fp_status);      \
3026    if (env->fp_status.float_exception_flags & float_flag_invalid) {         \
3027        float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb.sfld));         \
3028        xt.tfld = rnan;                                                      \
3029    }                                                                        \
3030                                                                             \
3031    putVSR(rD(opcode) + 32, &xt, env);                                       \
3032    do_float_check_status(env, GETPC());                                     \
3033}
3034
3035VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0),          \
3036                  0x8000000000000000ULL)
3037
3038VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0),          \
3039                  0xffffffff80000000ULL)
3040VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
3041VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
3042
3043/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
3044 *   op    - instruction mnemonic
3045 *   nels  - number of elements (1, 2 or 4)
3046 *   stp   - source type (int32, uint32, int64 or uint64)
3047 *   ttp   - target type (float32 or float64)
3048 *   sfld  - source vsr_t field
3049 *   tfld  - target vsr_t field
3050 *   jdef  - definition of the j index (i or 2*i)
3051 *   sfprf - set FPRF
3052 */
3053#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
3054void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
3055{                                                                       \
3056    ppc_vsr_t xt, xb;                                                   \
3057    int i;                                                              \
3058                                                                        \
3059    getVSR(xB(opcode), &xb, env);                                       \
3060    getVSR(xT(opcode), &xt, env);                                       \
3061                                                                        \
3062    for (i = 0; i < nels; i++) {                                        \
3063        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
3064        if (r2sp) {                                                     \
3065            xt.tfld = helper_frsp(env, xt.tfld);                        \
3066        }                                                               \
3067        if (sfprf) {                                                    \
3068            helper_compute_fprf_float64(env, xt.tfld);                  \
3069        }                                                               \
3070    }                                                                   \
3071                                                                        \
3072    putVSR(xT(opcode), &xt, env);                                       \
3073    do_float_check_status(env, GETPC());                                \
3074}
3075
3076VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3077VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3078VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3079VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3080VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3081VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3082VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
3083VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
3084VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
3085VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
3086VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3087VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3088
3089/* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3090 *   op    - instruction mnemonic
3091 *   stp   - source type (int32, uint32, int64 or uint64)
3092 *   ttp   - target type (float32 or float64)
3093 *   sfld  - source vsr_t field
3094 *   tfld  - target vsr_t field
3095 */
3096#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld)              \
3097void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
3098{                                                                       \
3099    ppc_vsr_t xt, xb;                                                   \
3100                                                                        \
3101    getVSR(rB(opcode) + 32, &xb, env);                                  \
3102    getVSR(rD(opcode) + 32, &xt, env);                                  \
3103                                                                        \
3104    xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);                 \
3105    helper_compute_fprf_##ttp(env, xt.tfld);                            \
3106                                                                        \
3107    putVSR(xT(opcode) + 32, &xt, env);                                  \
3108    do_float_check_status(env, GETPC());                                \
3109}
3110
3111VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3112VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3113
3114/* For "use current rounding mode", define a value that will not be one of
3115 * the existing rounding model enums.
3116 */
3117#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3118  float_round_up + float_round_to_zero)
3119
3120/* VSX_ROUND - VSX floating point round
3121 *   op    - instruction mnemonic
3122 *   nels  - number of elements (1, 2 or 4)
3123 *   tp    - type (float32 or float64)
3124 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3125 *   rmode - rounding mode
3126 *   sfprf - set FPRF
3127 */
3128#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf)                     \
3129void helper_##op(CPUPPCState *env, uint32_t opcode)                    \
3130{                                                                      \
3131    ppc_vsr_t xt, xb;                                                  \
3132    int i;                                                             \
3133    getVSR(xB(opcode), &xb, env);                                      \
3134    getVSR(xT(opcode), &xt, env);                                      \
3135                                                                       \
3136    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3137        set_float_rounding_mode(rmode, &env->fp_status);               \
3138    }                                                                  \
3139                                                                       \
3140    for (i = 0; i < nels; i++) {                                       \
3141        if (unlikely(tp##_is_signaling_nan(xb.fld,                     \
3142                                           &env->fp_status))) {        \
3143            float_invalid_op_vxsnan(env, GETPC());                     \
3144            xt.fld = tp##_snan_to_qnan(xb.fld);                        \
3145        } else {                                                       \
3146            xt.fld = tp##_round_to_int(xb.fld, &env->fp_status);       \
3147        }                                                              \
3148        if (sfprf) {                                                   \
3149            helper_compute_fprf_float64(env, xt.fld);                  \
3150        }                                                              \
3151    }                                                                  \
3152                                                                       \
3153    /* If this is not a "use current rounding mode" instruction,       \
3154     * then inhibit setting of the XX bit and restore rounding         \
3155     * mode from FPSCR */                                              \
3156    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3157        fpscr_set_rounding_mode(env);                                  \
3158        env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
3159    }                                                                  \
3160                                                                       \
3161    putVSR(xT(opcode), &xt, env);                                      \
3162    do_float_check_status(env, GETPC());                               \
3163}
3164
3165VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3166VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3167VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3168VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3169VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3170
3171VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3172VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3173VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3174VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3175VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3176
3177VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3178VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3179VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3180VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3181VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3182
3183uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3184{
3185    helper_reset_fpstatus(env);
3186
3187    uint64_t xt = helper_frsp(env, xb);
3188
3189    helper_compute_fprf_float64(env, xt);
3190    do_float_check_status(env, GETPC());
3191    return xt;
3192}
3193
3194#define VSX_XXPERM(op, indexed)                                       \
3195void helper_##op(CPUPPCState *env, uint32_t opcode)                   \
3196{                                                                     \
3197    ppc_vsr_t xt, xa, pcv, xto;                                       \
3198    int i, idx;                                                       \
3199                                                                      \
3200    getVSR(xA(opcode), &xa, env);                                     \
3201    getVSR(xT(opcode), &xt, env);                                     \
3202    getVSR(xB(opcode), &pcv, env);                                    \
3203                                                                      \
3204    for (i = 0; i < 16; i++) {                                        \
3205        idx = pcv.VsrB(i) & 0x1F;                                     \
3206        if (indexed) {                                                \
3207            idx = 31 - idx;                                           \
3208        }                                                             \
3209        xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3210    }                                                                 \
3211    putVSR(xT(opcode), &xto, env);                                    \
3212}
3213
3214VSX_XXPERM(xxperm, 0)
3215VSX_XXPERM(xxpermr, 1)
3216
3217void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
3218{
3219    ppc_vsr_t xt, xb;
3220    uint32_t exp, i, fraction;
3221
3222    getVSR(xB(opcode), &xb, env);
3223    memset(&xt, 0, sizeof(xt));
3224
3225    for (i = 0; i < 4; i++) {
3226        exp = (xb.VsrW(i) >> 23) & 0xFF;
3227        fraction = xb.VsrW(i) & 0x7FFFFF;
3228        if (exp != 0 && exp != 255) {
3229            xt.VsrW(i) = fraction | 0x00800000;
3230        } else {
3231            xt.VsrW(i) = fraction;
3232        }
3233    }
3234    putVSR(xT(opcode), &xt, env);
3235}
3236
3237/* VSX_TEST_DC - VSX floating point test data class
3238 *   op    - instruction mnemonic
3239 *   nels  - number of elements (1, 2 or 4)
3240 *   xbn   - VSR register number
3241 *   tp    - type (float32 or float64)
3242 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3243 *   tfld   - target vsr_t field (VsrD(*) or VsrW(*))
3244 *   fld_max - target field max
3245 *   scrf - set result in CR and FPCC
3246 */
3247#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf)  \
3248void helper_##op(CPUPPCState *env, uint32_t opcode)         \
3249{                                                           \
3250    ppc_vsr_t xt, xb;                                       \
3251    uint32_t i, sign, dcmx;                                 \
3252    uint32_t cc, match = 0;                                 \
3253                                                            \
3254    getVSR(xbn, &xb, env);                                  \
3255    if (!scrf) {                                            \
3256        memset(&xt, 0, sizeof(xt));                         \
3257        dcmx = DCMX_XV(opcode);                             \
3258    } else {                                                \
3259        dcmx = DCMX(opcode);                                \
3260    }                                                       \
3261                                                            \
3262    for (i = 0; i < nels; i++) {                            \
3263        sign = tp##_is_neg(xb.fld);                         \
3264        if (tp##_is_any_nan(xb.fld)) {                      \
3265            match = extract32(dcmx, 6, 1);                  \
3266        } else if (tp##_is_infinity(xb.fld)) {              \
3267            match = extract32(dcmx, 4 + !sign, 1);          \
3268        } else if (tp##_is_zero(xb.fld)) {                  \
3269            match = extract32(dcmx, 2 + !sign, 1);          \
3270        } else if (tp##_is_zero_or_denormal(xb.fld)) {      \
3271            match = extract32(dcmx, 0 + !sign, 1);          \
3272        }                                                   \
3273                                                            \
3274        if (scrf) {                                         \
3275            cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT;  \
3276            env->fpscr &= ~(0x0F << FPSCR_FPRF);            \
3277            env->fpscr |= cc << FPSCR_FPRF;                 \
3278            env->crf[BF(opcode)] = cc;                      \
3279        } else {                                            \
3280            xt.tfld = match ? fld_max : 0;                  \
3281        }                                                   \
3282        match = 0;                                          \
3283    }                                                       \
3284    if (!scrf) {                                            \
3285        putVSR(xT(opcode), &xt, env);                       \
3286    }                                                       \
3287}
3288
3289VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3290VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3291VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3292VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3293
3294void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
3295{
3296    ppc_vsr_t xb;
3297    uint32_t dcmx, sign, exp;
3298    uint32_t cc, match = 0, not_sp = 0;
3299
3300    getVSR(xB(opcode), &xb, env);
3301    dcmx = DCMX(opcode);
3302    exp = (xb.VsrD(0) >> 52) & 0x7FF;
3303
3304    sign = float64_is_neg(xb.VsrD(0));
3305    if (float64_is_any_nan(xb.VsrD(0))) {
3306        match = extract32(dcmx, 6, 1);
3307    } else if (float64_is_infinity(xb.VsrD(0))) {
3308        match = extract32(dcmx, 4 + !sign, 1);
3309    } else if (float64_is_zero(xb.VsrD(0))) {
3310        match = extract32(dcmx, 2 + !sign, 1);
3311    } else if (float64_is_zero_or_denormal(xb.VsrD(0)) ||
3312               (exp > 0 && exp < 0x381)) {
3313        match = extract32(dcmx, 0 + !sign, 1);
3314    }
3315
3316    not_sp = !float64_eq(xb.VsrD(0),
3317                         float32_to_float64(
3318                             float64_to_float32(xb.VsrD(0), &env->fp_status),
3319                             &env->fp_status), &env->fp_status);
3320
3321    cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3322    env->fpscr &= ~(0x0F << FPSCR_FPRF);
3323    env->fpscr |= cc << FPSCR_FPRF;
3324    env->crf[BF(opcode)] = cc;
3325}
3326
3327void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
3328{
3329    ppc_vsr_t xb;
3330    ppc_vsr_t xt;
3331    uint8_t r = Rrm(opcode);
3332    uint8_t ex = Rc(opcode);
3333    uint8_t rmc = RMC(opcode);
3334    uint8_t rmode = 0;
3335    float_status tstat;
3336
3337    getVSR(rB(opcode) + 32, &xb, env);
3338    memset(&xt, 0, sizeof(xt));
3339    helper_reset_fpstatus(env);
3340
3341    if (r == 0 && rmc == 0) {
3342        rmode = float_round_ties_away;
3343    } else if (r == 0 && rmc == 0x3) {
3344        rmode = fpscr_rn;
3345    } else if (r == 1) {
3346        switch (rmc) {
3347        case 0:
3348            rmode = float_round_nearest_even;
3349            break;
3350        case 1:
3351            rmode = float_round_to_zero;
3352            break;
3353        case 2:
3354            rmode = float_round_up;
3355            break;
3356        case 3:
3357            rmode = float_round_down;
3358            break;
3359        default:
3360            abort();
3361        }
3362    }
3363
3364    tstat = env->fp_status;
3365    set_float_exception_flags(0, &tstat);
3366    set_float_rounding_mode(rmode, &tstat);
3367    xt.f128 = float128_round_to_int(xb.f128, &tstat);
3368    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3369
3370    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3371        if (float128_is_signaling_nan(xb.f128, &tstat)) {
3372            float_invalid_op_vxsnan(env, GETPC());
3373            xt.f128 = float128_snan_to_qnan(xt.f128);
3374        }
3375    }
3376
3377    if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3378        env->fp_status.float_exception_flags &= ~float_flag_inexact;
3379    }
3380
3381    helper_compute_fprf_float128(env, xt.f128);
3382    do_float_check_status(env, GETPC());
3383    putVSR(rD(opcode) + 32, &xt, env);
3384}
3385
3386void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
3387{
3388    ppc_vsr_t xb;
3389    ppc_vsr_t xt;
3390    uint8_t r = Rrm(opcode);
3391    uint8_t rmc = RMC(opcode);
3392    uint8_t rmode = 0;
3393    floatx80 round_res;
3394    float_status tstat;
3395
3396    getVSR(rB(opcode) + 32, &xb, env);
3397    memset(&xt, 0, sizeof(xt));
3398    helper_reset_fpstatus(env);
3399
3400    if (r == 0 && rmc == 0) {
3401        rmode = float_round_ties_away;
3402    } else if (r == 0 && rmc == 0x3) {
3403        rmode = fpscr_rn;
3404    } else if (r == 1) {
3405        switch (rmc) {
3406        case 0:
3407            rmode = float_round_nearest_even;
3408            break;
3409        case 1:
3410            rmode = float_round_to_zero;
3411            break;
3412        case 2:
3413            rmode = float_round_up;
3414            break;
3415        case 3:
3416            rmode = float_round_down;
3417            break;
3418        default:
3419            abort();
3420        }
3421    }
3422
3423    tstat = env->fp_status;
3424    set_float_exception_flags(0, &tstat);
3425    set_float_rounding_mode(rmode, &tstat);
3426    round_res = float128_to_floatx80(xb.f128, &tstat);
3427    xt.f128 = floatx80_to_float128(round_res, &tstat);
3428    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3429
3430    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3431        if (float128_is_signaling_nan(xb.f128, &tstat)) {
3432            float_invalid_op_vxsnan(env, GETPC());
3433            xt.f128 = float128_snan_to_qnan(xt.f128);
3434        }
3435    }
3436
3437    helper_compute_fprf_float128(env, xt.f128);
3438    putVSR(rD(opcode) + 32, &xt, env);
3439    do_float_check_status(env, GETPC());
3440}
3441
3442void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
3443{
3444    ppc_vsr_t xb;
3445    ppc_vsr_t xt;
3446    float_status tstat;
3447
3448    getVSR(rB(opcode) + 32, &xb, env);
3449    memset(&xt, 0, sizeof(xt));
3450    helper_reset_fpstatus(env);
3451
3452    tstat = env->fp_status;
3453    if (unlikely(Rc(opcode) != 0)) {
3454        tstat.float_rounding_mode = float_round_to_odd;
3455    }
3456
3457    set_float_exception_flags(0, &tstat);
3458    xt.f128 = float128_sqrt(xb.f128, &tstat);
3459    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3460
3461    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3462        if (float128_is_signaling_nan(xb.f128, &tstat)) {
3463            float_invalid_op_vxsnan(env, GETPC());
3464            xt.f128 = float128_snan_to_qnan(xb.f128);
3465        } else if  (float128_is_quiet_nan(xb.f128, &tstat)) {
3466            xt.f128 = xb.f128;
3467        } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) {
3468            float_invalid_op_vxsqrt(env, 1, GETPC());
3469            xt.f128 = float128_default_nan(&env->fp_status);
3470        }
3471    }
3472
3473    helper_compute_fprf_float128(env, xt.f128);
3474    putVSR(rD(opcode) + 32, &xt, env);
3475    do_float_check_status(env, GETPC());
3476}
3477
3478void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
3479{
3480    ppc_vsr_t xt, xa, xb;
3481    float_status tstat;
3482
3483    getVSR(rA(opcode) + 32, &xa, env);
3484    getVSR(rB(opcode) + 32, &xb, env);
3485    getVSR(rD(opcode) + 32, &xt, env);
3486    helper_reset_fpstatus(env);
3487
3488    tstat = env->fp_status;
3489    if (unlikely(Rc(opcode) != 0)) {
3490        tstat.float_rounding_mode = float_round_to_odd;
3491    }
3492
3493    set_float_exception_flags(0, &tstat);
3494    xt.f128 = float128_sub(xa.f128, xb.f128, &tstat);
3495    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3496
3497    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3498        float_invalid_op_addsub(env, 1, GETPC(),
3499                                float128_classify(xa.f128) |
3500                                float128_classify(xb.f128));
3501    }
3502
3503    helper_compute_fprf_float128(env, xt.f128);
3504    putVSR(rD(opcode) + 32, &xt, env);
3505    do_float_check_status(env, GETPC());
3506}
3507