qemu/target/ppc/fpu_helper.c
<<
>>
Prefs
   1/*
   2 *  PowerPC floating point and SPE emulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "exec/helper-proto.h"
  22#include "exec/exec-all.h"
  23#include "internal.h"
  24#include "fpu/softfloat.h"
  25
  26static inline float128 float128_snan_to_qnan(float128 x)
  27{
  28    float128 r;
  29
  30    r.high = x.high | 0x0000800000000000;
  31    r.low = x.low;
  32    return r;
  33}
  34
  35#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
  36#define float32_snan_to_qnan(x) ((x) | 0x00400000)
  37#define float16_snan_to_qnan(x) ((x) | 0x0200)
  38
  39static inline bool fp_exceptions_enabled(CPUPPCState *env)
  40{
  41#ifdef CONFIG_USER_ONLY
  42    return true;
  43#else
  44    return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0;
  45#endif
  46}
  47
  48/*****************************************************************************/
  49/* Floating point operations helpers */
  50
  51/*
  52 * This is the non-arithmatic conversion that happens e.g. on loads.
  53 * In the Power ISA pseudocode, this is called DOUBLE.
  54 */
  55uint64_t helper_todouble(uint32_t arg)
  56{
  57    uint32_t abs_arg = arg & 0x7fffffff;
  58    uint64_t ret;
  59
  60    if (likely(abs_arg >= 0x00800000)) {
  61        /* Normalized operand, or Inf, or NaN.  */
  62        ret  = (uint64_t)extract32(arg, 30, 2) << 62;
  63        ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
  64        ret |= (uint64_t)extract32(arg, 0, 30) << 29;
  65    } else {
  66        /* Zero or Denormalized operand.  */
  67        ret = (uint64_t)extract32(arg, 31, 1) << 63;
  68        if (unlikely(abs_arg != 0)) {
  69            /* Denormalized operand.  */
  70            int shift = clz32(abs_arg) - 9;
  71            int exp = -126 - shift + 1023;
  72            ret |= (uint64_t)exp << 52;
  73            ret |= abs_arg << (shift + 29);
  74        }
  75    }
  76    return ret;
  77}
  78
  79/*
  80 * This is the non-arithmatic conversion that happens e.g. on stores.
  81 * In the Power ISA pseudocode, this is called SINGLE.
  82 */
  83uint32_t helper_tosingle(uint64_t arg)
  84{
  85    int exp = extract64(arg, 52, 11);
  86    uint32_t ret;
  87
  88    if (likely(exp > 896)) {
  89        /* No denormalization required (includes Inf, NaN).  */
  90        ret  = extract64(arg, 62, 2) << 30;
  91        ret |= extract64(arg, 29, 30);
  92    } else {
  93        /*
  94         * Zero or Denormal result.  If the exponent is in bounds for
  95         * a single-precision denormal result, extract the proper
  96         * bits.  If the input is not zero, and the exponent is out of
  97         * bounds, then the result is undefined; this underflows to
  98         * zero.
  99         */
 100        ret = extract64(arg, 63, 1) << 31;
 101        if (unlikely(exp >= 874)) {
 102            /* Denormal result.  */
 103            ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp);
 104        }
 105    }
 106    return ret;
 107}
 108
 109static inline int ppc_float32_get_unbiased_exp(float32 f)
 110{
 111    return ((f >> 23) & 0xFF) - 127;
 112}
 113
 114static inline int ppc_float64_get_unbiased_exp(float64 f)
 115{
 116    return ((f >> 52) & 0x7FF) - 1023;
 117}
 118
 119/* Classify a floating-point number.  */
 120enum {
 121    is_normal   = 1,
 122    is_zero     = 2,
 123    is_denormal = 4,
 124    is_inf      = 8,
 125    is_qnan     = 16,
 126    is_snan     = 32,
 127    is_neg      = 64,
 128};
 129
 130#define COMPUTE_CLASS(tp)                                      \
 131static int tp##_classify(tp arg)                               \
 132{                                                              \
 133    int ret = tp##_is_neg(arg) * is_neg;                       \
 134    if (unlikely(tp##_is_any_nan(arg))) {                      \
 135        float_status dummy = { };  /* snan_bit_is_one = 0 */   \
 136        ret |= (tp##_is_signaling_nan(arg, &dummy)             \
 137                ? is_snan : is_qnan);                          \
 138    } else if (unlikely(tp##_is_infinity(arg))) {              \
 139        ret |= is_inf;                                         \
 140    } else if (tp##_is_zero(arg)) {                            \
 141        ret |= is_zero;                                        \
 142    } else if (tp##_is_zero_or_denormal(arg)) {                \
 143        ret |= is_denormal;                                    \
 144    } else {                                                   \
 145        ret |= is_normal;                                      \
 146    }                                                          \
 147    return ret;                                                \
 148}
 149
 150COMPUTE_CLASS(float16)
 151COMPUTE_CLASS(float32)
 152COMPUTE_CLASS(float64)
 153COMPUTE_CLASS(float128)
 154
 155static void set_fprf_from_class(CPUPPCState *env, int class)
 156{
 157    static const uint8_t fprf[6][2] = {
 158        { 0x04, 0x08 },  /* normalized */
 159        { 0x02, 0x12 },  /* zero */
 160        { 0x14, 0x18 },  /* denormalized */
 161        { 0x05, 0x09 },  /* infinity */
 162        { 0x11, 0x11 },  /* qnan */
 163        { 0x00, 0x00 },  /* snan -- flags are undefined */
 164    };
 165    bool isneg = class & is_neg;
 166
 167    env->fpscr &= ~(0x1F << FPSCR_FPRF);
 168    env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF;
 169}
 170
 171#define COMPUTE_FPRF(tp)                                \
 172void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
 173{                                                       \
 174    set_fprf_from_class(env, tp##_classify(arg));       \
 175}
 176
 177COMPUTE_FPRF(float16)
 178COMPUTE_FPRF(float32)
 179COMPUTE_FPRF(float64)
 180COMPUTE_FPRF(float128)
 181
 182/* Floating-point invalid operations exception */
 183static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
 184{
 185    /* Update the floating-point invalid operation summary */
 186    env->fpscr |= 1 << FPSCR_VX;
 187    /* Update the floating-point exception summary */
 188    env->fpscr |= FP_FX;
 189    if (fpscr_ve != 0) {
 190        /* Update the floating-point enabled exception summary */
 191        env->fpscr |= 1 << FPSCR_FEX;
 192        if (fp_exceptions_enabled(env)) {
 193            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 194                                   POWERPC_EXCP_FP | op, retaddr);
 195        }
 196    }
 197}
 198
 199static void finish_invalid_op_arith(CPUPPCState *env, int op,
 200                                    bool set_fpcc, uintptr_t retaddr)
 201{
 202    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 203    if (fpscr_ve == 0) {
 204        if (set_fpcc) {
 205            env->fpscr &= ~(0xF << FPSCR_FPCC);
 206            env->fpscr |= 0x11 << FPSCR_FPCC;
 207        }
 208    }
 209    finish_invalid_op_excp(env, op, retaddr);
 210}
 211
 212/* Signalling NaN */
 213static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr)
 214{
 215    env->fpscr |= 1 << FPSCR_VXSNAN;
 216    finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr);
 217}
 218
 219/* Magnitude subtraction of infinities */
 220static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc,
 221                                   uintptr_t retaddr)
 222{
 223    env->fpscr |= 1 << FPSCR_VXISI;
 224    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr);
 225}
 226
 227/* Division of infinity by infinity */
 228static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc,
 229                                   uintptr_t retaddr)
 230{
 231    env->fpscr |= 1 << FPSCR_VXIDI;
 232    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr);
 233}
 234
 235/* Division of zero by zero */
 236static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc,
 237                                   uintptr_t retaddr)
 238{
 239    env->fpscr |= 1 << FPSCR_VXZDZ;
 240    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr);
 241}
 242
 243/* Multiplication of zero by infinity */
 244static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc,
 245                                   uintptr_t retaddr)
 246{
 247    env->fpscr |= 1 << FPSCR_VXIMZ;
 248    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr);
 249}
 250
 251/* Square root of a negative number */
 252static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc,
 253                                    uintptr_t retaddr)
 254{
 255    env->fpscr |= 1 << FPSCR_VXSQRT;
 256    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr);
 257}
 258
 259/* Ordered comparison of NaN */
 260static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
 261                                  uintptr_t retaddr)
 262{
 263    env->fpscr |= 1 << FPSCR_VXVC;
 264    if (set_fpcc) {
 265        env->fpscr &= ~(0xF << FPSCR_FPCC);
 266        env->fpscr |= 0x11 << FPSCR_FPCC;
 267    }
 268    /* Update the floating-point invalid operation summary */
 269    env->fpscr |= 1 << FPSCR_VX;
 270    /* Update the floating-point exception summary */
 271    env->fpscr |= FP_FX;
 272    /* We must update the target FPR before raising the exception */
 273    if (fpscr_ve != 0) {
 274        CPUState *cs = env_cpu(env);
 275
 276        cs->exception_index = POWERPC_EXCP_PROGRAM;
 277        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
 278        /* Update the floating-point enabled exception summary */
 279        env->fpscr |= 1 << FPSCR_FEX;
 280        /* Exception is differed */
 281    }
 282}
 283
 284/* Invalid conversion */
 285static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
 286                                   uintptr_t retaddr)
 287{
 288    env->fpscr |= 1 << FPSCR_VXCVI;
 289    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 290    if (fpscr_ve == 0) {
 291        if (set_fpcc) {
 292            env->fpscr &= ~(0xF << FPSCR_FPCC);
 293            env->fpscr |= 0x11 << FPSCR_FPCC;
 294        }
 295    }
 296    finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr);
 297}
 298
 299static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
 300{
 301    env->fpscr |= 1 << FPSCR_ZX;
 302    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 303    /* Update the floating-point exception summary */
 304    env->fpscr |= FP_FX;
 305    if (fpscr_ze != 0) {
 306        /* Update the floating-point enabled exception summary */
 307        env->fpscr |= 1 << FPSCR_FEX;
 308        if (fp_exceptions_enabled(env)) {
 309            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 310                                   POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
 311                                   raddr);
 312        }
 313    }
 314}
 315
 316static inline void float_overflow_excp(CPUPPCState *env)
 317{
 318    CPUState *cs = env_cpu(env);
 319
 320    env->fpscr |= 1 << FPSCR_OX;
 321    /* Update the floating-point exception summary */
 322    env->fpscr |= FP_FX;
 323    if (fpscr_oe != 0) {
 324        /* XXX: should adjust the result */
 325        /* Update the floating-point enabled exception summary */
 326        env->fpscr |= 1 << FPSCR_FEX;
 327        /* We must update the target FPR before raising the exception */
 328        cs->exception_index = POWERPC_EXCP_PROGRAM;
 329        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 330    } else {
 331        env->fpscr |= 1 << FPSCR_XX;
 332        env->fpscr |= 1 << FPSCR_FI;
 333    }
 334}
 335
 336static inline void float_underflow_excp(CPUPPCState *env)
 337{
 338    CPUState *cs = env_cpu(env);
 339
 340    env->fpscr |= 1 << FPSCR_UX;
 341    /* Update the floating-point exception summary */
 342    env->fpscr |= FP_FX;
 343    if (fpscr_ue != 0) {
 344        /* XXX: should adjust the result */
 345        /* Update the floating-point enabled exception summary */
 346        env->fpscr |= 1 << FPSCR_FEX;
 347        /* We must update the target FPR before raising the exception */
 348        cs->exception_index = POWERPC_EXCP_PROGRAM;
 349        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 350    }
 351}
 352
 353static inline void float_inexact_excp(CPUPPCState *env)
 354{
 355    CPUState *cs = env_cpu(env);
 356
 357    env->fpscr |= 1 << FPSCR_FI;
 358    env->fpscr |= 1 << FPSCR_XX;
 359    /* Update the floating-point exception summary */
 360    env->fpscr |= FP_FX;
 361    if (fpscr_xe != 0) {
 362        /* Update the floating-point enabled exception summary */
 363        env->fpscr |= 1 << FPSCR_FEX;
 364        /* We must update the target FPR before raising the exception */
 365        cs->exception_index = POWERPC_EXCP_PROGRAM;
 366        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 367    }
 368}
 369
 370static inline void fpscr_set_rounding_mode(CPUPPCState *env)
 371{
 372    int rnd_type;
 373
 374    /* Set rounding mode */
 375    switch (fpscr_rn) {
 376    case 0:
 377        /* Best approximation (round to nearest) */
 378        rnd_type = float_round_nearest_even;
 379        break;
 380    case 1:
 381        /* Smaller magnitude (round toward zero) */
 382        rnd_type = float_round_to_zero;
 383        break;
 384    case 2:
 385        /* Round toward +infinite */
 386        rnd_type = float_round_up;
 387        break;
 388    default:
 389    case 3:
 390        /* Round toward -infinite */
 391        rnd_type = float_round_down;
 392        break;
 393    }
 394    set_float_rounding_mode(rnd_type, &env->fp_status);
 395}
 396
 397void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
 398{
 399    int prev;
 400
 401    prev = (env->fpscr >> bit) & 1;
 402    env->fpscr &= ~(1 << bit);
 403    if (prev == 1) {
 404        switch (bit) {
 405        case FPSCR_RN1:
 406        case FPSCR_RN:
 407            fpscr_set_rounding_mode(env);
 408            break;
 409        case FPSCR_VXSNAN:
 410        case FPSCR_VXISI:
 411        case FPSCR_VXIDI:
 412        case FPSCR_VXZDZ:
 413        case FPSCR_VXIMZ:
 414        case FPSCR_VXVC:
 415        case FPSCR_VXSOFT:
 416        case FPSCR_VXSQRT:
 417        case FPSCR_VXCVI:
 418            if (!fpscr_ix) {
 419                /* Set VX bit to zero */
 420                env->fpscr &= ~(1 << FPSCR_VX);
 421            }
 422            break;
 423        case FPSCR_OX:
 424        case FPSCR_UX:
 425        case FPSCR_ZX:
 426        case FPSCR_XX:
 427        case FPSCR_VE:
 428        case FPSCR_OE:
 429        case FPSCR_UE:
 430        case FPSCR_ZE:
 431        case FPSCR_XE:
 432            if (!fpscr_eex) {
 433                /* Set the FEX bit */
 434                env->fpscr &= ~(1 << FPSCR_FEX);
 435            }
 436            break;
 437        default:
 438            break;
 439        }
 440    }
 441}
 442
 443void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
 444{
 445    CPUState *cs = env_cpu(env);
 446    int prev;
 447
 448    prev = (env->fpscr >> bit) & 1;
 449    env->fpscr |= 1 << bit;
 450    if (prev == 0) {
 451        switch (bit) {
 452        case FPSCR_VX:
 453            env->fpscr |= FP_FX;
 454            if (fpscr_ve) {
 455                goto raise_ve;
 456            }
 457            break;
 458        case FPSCR_OX:
 459            env->fpscr |= FP_FX;
 460            if (fpscr_oe) {
 461                goto raise_oe;
 462            }
 463            break;
 464        case FPSCR_UX:
 465            env->fpscr |= FP_FX;
 466            if (fpscr_ue) {
 467                goto raise_ue;
 468            }
 469            break;
 470        case FPSCR_ZX:
 471            env->fpscr |= FP_FX;
 472            if (fpscr_ze) {
 473                goto raise_ze;
 474            }
 475            break;
 476        case FPSCR_XX:
 477            env->fpscr |= FP_FX;
 478            if (fpscr_xe) {
 479                goto raise_xe;
 480            }
 481            break;
 482        case FPSCR_VXSNAN:
 483        case FPSCR_VXISI:
 484        case FPSCR_VXIDI:
 485        case FPSCR_VXZDZ:
 486        case FPSCR_VXIMZ:
 487        case FPSCR_VXVC:
 488        case FPSCR_VXSOFT:
 489        case FPSCR_VXSQRT:
 490        case FPSCR_VXCVI:
 491            env->fpscr |= 1 << FPSCR_VX;
 492            env->fpscr |= FP_FX;
 493            if (fpscr_ve != 0) {
 494                goto raise_ve;
 495            }
 496            break;
 497        case FPSCR_VE:
 498            if (fpscr_vx != 0) {
 499            raise_ve:
 500                env->error_code = POWERPC_EXCP_FP;
 501                if (fpscr_vxsnan) {
 502                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
 503                }
 504                if (fpscr_vxisi) {
 505                    env->error_code |= POWERPC_EXCP_FP_VXISI;
 506                }
 507                if (fpscr_vxidi) {
 508                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
 509                }
 510                if (fpscr_vxzdz) {
 511                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
 512                }
 513                if (fpscr_vximz) {
 514                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
 515                }
 516                if (fpscr_vxvc) {
 517                    env->error_code |= POWERPC_EXCP_FP_VXVC;
 518                }
 519                if (fpscr_vxsoft) {
 520                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
 521                }
 522                if (fpscr_vxsqrt) {
 523                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
 524                }
 525                if (fpscr_vxcvi) {
 526                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
 527                }
 528                goto raise_excp;
 529            }
 530            break;
 531        case FPSCR_OE:
 532            if (fpscr_ox != 0) {
 533            raise_oe:
 534                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 535                goto raise_excp;
 536            }
 537            break;
 538        case FPSCR_UE:
 539            if (fpscr_ux != 0) {
 540            raise_ue:
 541                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 542                goto raise_excp;
 543            }
 544            break;
 545        case FPSCR_ZE:
 546            if (fpscr_zx != 0) {
 547            raise_ze:
 548                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
 549                goto raise_excp;
 550            }
 551            break;
 552        case FPSCR_XE:
 553            if (fpscr_xx != 0) {
 554            raise_xe:
 555                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 556                goto raise_excp;
 557            }
 558            break;
 559        case FPSCR_RN1:
 560        case FPSCR_RN:
 561            fpscr_set_rounding_mode(env);
 562            break;
 563        default:
 564            break;
 565        raise_excp:
 566            /* Update the floating-point enabled exception summary */
 567            env->fpscr |= 1 << FPSCR_FEX;
 568            /* We have to update Rc1 before raising the exception */
 569            cs->exception_index = POWERPC_EXCP_PROGRAM;
 570            break;
 571        }
 572    }
 573}
 574
 575void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 576{
 577    CPUState *cs = env_cpu(env);
 578    target_ulong prev, new;
 579    int i;
 580
 581    prev = env->fpscr;
 582    new = (target_ulong)arg;
 583    new &= ~0x60000000LL;
 584    new |= prev & 0x60000000LL;
 585    for (i = 0; i < sizeof(target_ulong) * 2; i++) {
 586        if (mask & (1 << i)) {
 587            env->fpscr &= ~(0xFLL << (4 * i));
 588            env->fpscr |= new & (0xFLL << (4 * i));
 589        }
 590    }
 591    /* Update VX and FEX */
 592    if (fpscr_ix != 0) {
 593        env->fpscr |= 1 << FPSCR_VX;
 594    } else {
 595        env->fpscr &= ~(1 << FPSCR_VX);
 596    }
 597    if ((fpscr_ex & fpscr_eex) != 0) {
 598        env->fpscr |= 1 << FPSCR_FEX;
 599        cs->exception_index = POWERPC_EXCP_PROGRAM;
 600        /* XXX: we should compute it properly */
 601        env->error_code = POWERPC_EXCP_FP;
 602    } else {
 603        env->fpscr &= ~(1 << FPSCR_FEX);
 604    }
 605    fpscr_set_rounding_mode(env);
 606}
 607
 608void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 609{
 610    helper_store_fpscr(env, arg, mask);
 611}
 612
 613static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
 614{
 615    CPUState *cs = env_cpu(env);
 616    int status = get_float_exception_flags(&env->fp_status);
 617    bool inexact_happened = false;
 618
 619    if (status & float_flag_overflow) {
 620        float_overflow_excp(env);
 621    } else if (status & float_flag_underflow) {
 622        float_underflow_excp(env);
 623    } else if (status & float_flag_inexact) {
 624        float_inexact_excp(env);
 625        inexact_happened = true;
 626    }
 627
 628    /* if the inexact flag was not set */
 629    if (inexact_happened == false) {
 630        env->fpscr &= ~(1 << FPSCR_FI); /* clear the FPSCR[FI] bit */
 631    }
 632
 633    if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
 634        (env->error_code & POWERPC_EXCP_FP)) {
 635        /* Differred floating-point exception after target FPR update */
 636        if (fp_exceptions_enabled(env)) {
 637            raise_exception_err_ra(env, cs->exception_index,
 638                                   env->error_code, raddr);
 639        }
 640    }
 641}
 642
 643void helper_float_check_status(CPUPPCState *env)
 644{
 645    do_float_check_status(env, GETPC());
 646}
 647
 648void helper_reset_fpstatus(CPUPPCState *env)
 649{
 650    set_float_exception_flags(0, &env->fp_status);
 651}
 652
 653static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc,
 654                                    uintptr_t retaddr, int classes)
 655{
 656    if ((classes & ~is_neg) == is_inf) {
 657        /* Magnitude subtraction of infinities */
 658        float_invalid_op_vxisi(env, set_fpcc, retaddr);
 659    } else if (classes & is_snan) {
 660        float_invalid_op_vxsnan(env, retaddr);
 661    }
 662}
 663
 664/* fadd - fadd. */
 665float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
 666{
 667    float64 ret = float64_add(arg1, arg2, &env->fp_status);
 668    int status = get_float_exception_flags(&env->fp_status);
 669
 670    if (unlikely(status & float_flag_invalid)) {
 671        float_invalid_op_addsub(env, 1, GETPC(),
 672                                float64_classify(arg1) |
 673                                float64_classify(arg2));
 674    }
 675
 676    return ret;
 677}
 678
 679/* fsub - fsub. */
 680float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
 681{
 682    float64 ret = float64_sub(arg1, arg2, &env->fp_status);
 683    int status = get_float_exception_flags(&env->fp_status);
 684
 685    if (unlikely(status & float_flag_invalid)) {
 686        float_invalid_op_addsub(env, 1, GETPC(),
 687                                float64_classify(arg1) |
 688                                float64_classify(arg2));
 689    }
 690
 691    return ret;
 692}
 693
 694static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc,
 695                                 uintptr_t retaddr, int classes)
 696{
 697    if ((classes & (is_zero | is_inf)) == (is_zero | is_inf)) {
 698        /* Multiplication of zero by infinity */
 699        float_invalid_op_vximz(env, set_fprc, retaddr);
 700    } else if (classes & is_snan) {
 701        float_invalid_op_vxsnan(env, retaddr);
 702    }
 703}
 704
 705/* fmul - fmul. */
 706float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
 707{
 708    float64 ret = float64_mul(arg1, arg2, &env->fp_status);
 709    int status = get_float_exception_flags(&env->fp_status);
 710
 711    if (unlikely(status & float_flag_invalid)) {
 712        float_invalid_op_mul(env, 1, GETPC(),
 713                             float64_classify(arg1) |
 714                             float64_classify(arg2));
 715    }
 716
 717    return ret;
 718}
 719
 720static void float_invalid_op_div(CPUPPCState *env, bool set_fprc,
 721                                 uintptr_t retaddr, int classes)
 722{
 723    classes &= ~is_neg;
 724    if (classes == is_inf) {
 725        /* Division of infinity by infinity */
 726        float_invalid_op_vxidi(env, set_fprc, retaddr);
 727    } else if (classes == is_zero) {
 728        /* Division of zero by zero */
 729        float_invalid_op_vxzdz(env, set_fprc, retaddr);
 730    } else if (classes & is_snan) {
 731        float_invalid_op_vxsnan(env, retaddr);
 732    }
 733}
 734
 735/* fdiv - fdiv. */
 736float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
 737{
 738    float64 ret = float64_div(arg1, arg2, &env->fp_status);
 739    int status = get_float_exception_flags(&env->fp_status);
 740
 741    if (unlikely(status)) {
 742        if (status & float_flag_invalid) {
 743            float_invalid_op_div(env, 1, GETPC(),
 744                                 float64_classify(arg1) |
 745                                 float64_classify(arg2));
 746        }
 747        if (status & float_flag_divbyzero) {
 748            float_zero_divide_excp(env, GETPC());
 749        }
 750    }
 751
 752    return ret;
 753}
 754
 755static void float_invalid_cvt(CPUPPCState *env, bool set_fprc,
 756                              uintptr_t retaddr, int class1)
 757{
 758    float_invalid_op_vxcvi(env, set_fprc, retaddr);
 759    if (class1 & is_snan) {
 760        float_invalid_op_vxsnan(env, retaddr);
 761    }
 762}
 763
 764#define FPU_FCTI(op, cvt, nanval)                                      \
 765uint64_t helper_##op(CPUPPCState *env, float64 arg)                    \
 766{                                                                      \
 767    uint64_t ret = float64_to_##cvt(arg, &env->fp_status);             \
 768    int status = get_float_exception_flags(&env->fp_status);           \
 769                                                                       \
 770    if (unlikely(status)) {                                            \
 771        if (status & float_flag_invalid) {                             \
 772            float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \
 773            ret = nanval;                                              \
 774        }                                                              \
 775        do_float_check_status(env, GETPC());                           \
 776    }                                                                  \
 777    return ret;                                                        \
 778}
 779
 780FPU_FCTI(fctiw, int32, 0x80000000U)
 781FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
 782FPU_FCTI(fctiwu, uint32, 0x00000000U)
 783FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
 784FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
 785FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
 786FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
 787FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
 788
 789#define FPU_FCFI(op, cvtr, is_single)                      \
 790uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
 791{                                                          \
 792    CPU_DoubleU farg;                                      \
 793                                                           \
 794    if (is_single) {                                       \
 795        float32 tmp = cvtr(arg, &env->fp_status);          \
 796        farg.d = float32_to_float64(tmp, &env->fp_status); \
 797    } else {                                               \
 798        farg.d = cvtr(arg, &env->fp_status);               \
 799    }                                                      \
 800    do_float_check_status(env, GETPC());                   \
 801    return farg.ll;                                        \
 802}
 803
 804FPU_FCFI(fcfid, int64_to_float64, 0)
 805FPU_FCFI(fcfids, int64_to_float32, 1)
 806FPU_FCFI(fcfidu, uint64_to_float64, 0)
 807FPU_FCFI(fcfidus, uint64_to_float32, 1)
 808
 809static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
 810                              int rounding_mode)
 811{
 812    CPU_DoubleU farg;
 813
 814    farg.ll = arg;
 815
 816    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 817        /* sNaN round */
 818        float_invalid_op_vxsnan(env, GETPC());
 819        farg.ll = arg | 0x0008000000000000ULL;
 820    } else {
 821        int inexact = get_float_exception_flags(&env->fp_status) &
 822                      float_flag_inexact;
 823        set_float_rounding_mode(rounding_mode, &env->fp_status);
 824        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
 825        /* Restore rounding mode from FPSCR */
 826        fpscr_set_rounding_mode(env);
 827
 828        /* fri* does not set FPSCR[XX] */
 829        if (!inexact) {
 830            env->fp_status.float_exception_flags &= ~float_flag_inexact;
 831        }
 832    }
 833    do_float_check_status(env, GETPC());
 834    return farg.ll;
 835}
 836
 837uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
 838{
 839    return do_fri(env, arg, float_round_ties_away);
 840}
 841
 842uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
 843{
 844    return do_fri(env, arg, float_round_to_zero);
 845}
 846
 847uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
 848{
 849    return do_fri(env, arg, float_round_up);
 850}
 851
 852uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
 853{
 854    return do_fri(env, arg, float_round_down);
 855}
 856
 857#define FPU_MADDSUB_UPDATE(NAME, TP)                                    \
 858static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3,           \
 859                 unsigned int madd_flags, uintptr_t retaddr)            \
 860{                                                                       \
 861    if (TP##_is_signaling_nan(arg1, &env->fp_status) ||                 \
 862        TP##_is_signaling_nan(arg2, &env->fp_status) ||                 \
 863        TP##_is_signaling_nan(arg3, &env->fp_status)) {                 \
 864        /* sNaN operation */                                            \
 865        float_invalid_op_vxsnan(env, retaddr);                          \
 866    }                                                                   \
 867    if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) ||               \
 868        (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) {               \
 869        /* Multiplication of zero by infinity */                        \
 870        float_invalid_op_vximz(env, 1, retaddr);                        \
 871    }                                                                   \
 872    if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) &&           \
 873        TP##_is_infinity(arg3)) {                                       \
 874        uint8_t aSign, bSign, cSign;                                    \
 875                                                                        \
 876        aSign = TP##_is_neg(arg1);                                      \
 877        bSign = TP##_is_neg(arg2);                                      \
 878        cSign = TP##_is_neg(arg3);                                      \
 879        if (madd_flags & float_muladd_negate_c) {                       \
 880            cSign ^= 1;                                                 \
 881        }                                                               \
 882        if (aSign ^ bSign ^ cSign) {                                    \
 883            float_invalid_op_vxisi(env, 1, retaddr);                    \
 884        }                                                               \
 885    }                                                                   \
 886}
 887FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
 888FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
 889
 890#define FPU_FMADD(op, madd_flags)                                       \
 891uint64_t helper_##op(CPUPPCState *env, uint64_t arg1,                   \
 892                     uint64_t arg2, uint64_t arg3)                      \
 893{                                                                       \
 894    uint32_t flags;                                                     \
 895    float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags,          \
 896                                 &env->fp_status);                      \
 897    flags = get_float_exception_flags(&env->fp_status);                 \
 898    if (flags) {                                                        \
 899        if (flags & float_flag_invalid) {                               \
 900            float64_maddsub_update_excp(env, arg1, arg2, arg3,          \
 901                                        madd_flags, GETPC());           \
 902        }                                                               \
 903        do_float_check_status(env, GETPC());                            \
 904    }                                                                   \
 905    return ret;                                                         \
 906}
 907
 908#define MADD_FLGS 0
 909#define MSUB_FLGS float_muladd_negate_c
 910#define NMADD_FLGS float_muladd_negate_result
 911#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
 912
 913FPU_FMADD(fmadd, MADD_FLGS)
 914FPU_FMADD(fnmadd, NMADD_FLGS)
 915FPU_FMADD(fmsub, MSUB_FLGS)
 916FPU_FMADD(fnmsub, NMSUB_FLGS)
 917
 918/* frsp - frsp. */
 919uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
 920{
 921    CPU_DoubleU farg;
 922    float32 f32;
 923
 924    farg.ll = arg;
 925
 926    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 927        float_invalid_op_vxsnan(env, GETPC());
 928    }
 929    f32 = float64_to_float32(farg.d, &env->fp_status);
 930    farg.d = float32_to_float64(f32, &env->fp_status);
 931
 932    return farg.ll;
 933}
 934
 935/* fsqrt - fsqrt. */
 936float64 helper_fsqrt(CPUPPCState *env, float64 arg)
 937{
 938    float64 ret = float64_sqrt(arg, &env->fp_status);
 939    int status = get_float_exception_flags(&env->fp_status);
 940
 941    if (unlikely(status & float_flag_invalid)) {
 942        if (unlikely(float64_is_any_nan(arg))) {
 943            if (unlikely(float64_is_signaling_nan(arg, &env->fp_status))) {
 944                /* sNaN square root */
 945                float_invalid_op_vxsnan(env, GETPC());
 946            }
 947        } else {
 948            /* Square root of a negative nonzero number */
 949            float_invalid_op_vxsqrt(env, 1, GETPC());
 950        }
 951    }
 952
 953    return ret;
 954}
 955
 956/* fre - fre. */
 957float64 helper_fre(CPUPPCState *env, float64 arg)
 958{
 959    /* "Estimate" the reciprocal with actual division.  */
 960    float64 ret = float64_div(float64_one, arg, &env->fp_status);
 961    int status = get_float_exception_flags(&env->fp_status);
 962
 963    if (unlikely(status)) {
 964        if (status & float_flag_invalid) {
 965            if (float64_is_signaling_nan(arg, &env->fp_status)) {
 966                /* sNaN reciprocal */
 967                float_invalid_op_vxsnan(env, GETPC());
 968            }
 969        }
 970        if (status & float_flag_divbyzero) {
 971            float_zero_divide_excp(env, GETPC());
 972            /* For FPSCR.ZE == 0, the result is 1/2.  */
 973            ret = float64_set_sign(float64_half, float64_is_neg(arg));
 974        }
 975    }
 976
 977    return ret;
 978}
 979
 980/* fres - fres. */
 981uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
 982{
 983    CPU_DoubleU farg;
 984    float32 f32;
 985
 986    farg.ll = arg;
 987
 988    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 989        /* sNaN reciprocal */
 990        float_invalid_op_vxsnan(env, GETPC());
 991    }
 992    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
 993    f32 = float64_to_float32(farg.d, &env->fp_status);
 994    farg.d = float32_to_float64(f32, &env->fp_status);
 995
 996    return farg.ll;
 997}
 998
 999/* frsqrte  - frsqrte. */
1000float64 helper_frsqrte(CPUPPCState *env, float64 arg)
1001{
1002    /* "Estimate" the reciprocal with actual division.  */
1003    float64 rets = float64_sqrt(arg, &env->fp_status);
1004    float64 retd = float64_div(float64_one, rets, &env->fp_status);
1005    int status = get_float_exception_flags(&env->fp_status);
1006
1007    if (unlikely(status)) {
1008        if (status & float_flag_invalid) {
1009            if (float64_is_signaling_nan(arg, &env->fp_status)) {
1010                /* sNaN reciprocal */
1011                float_invalid_op_vxsnan(env, GETPC());
1012            } else {
1013                /* Square root of a negative nonzero number */
1014                float_invalid_op_vxsqrt(env, 1, GETPC());
1015            }
1016        }
1017        if (status & float_flag_divbyzero) {
1018            /* Reciprocal of (square root of) zero.  */
1019            float_zero_divide_excp(env, GETPC());
1020        }
1021    }
1022
1023    return retd;
1024}
1025
1026/* fsel - fsel. */
1027uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1028                     uint64_t arg3)
1029{
1030    CPU_DoubleU farg1;
1031
1032    farg1.ll = arg1;
1033
1034    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
1035        !float64_is_any_nan(farg1.d)) {
1036        return arg2;
1037    } else {
1038        return arg3;
1039    }
1040}
1041
1042uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
1043{
1044    int fe_flag = 0;
1045    int fg_flag = 0;
1046
1047    if (unlikely(float64_is_infinity(fra) ||
1048                 float64_is_infinity(frb) ||
1049                 float64_is_zero(frb))) {
1050        fe_flag = 1;
1051        fg_flag = 1;
1052    } else {
1053        int e_a = ppc_float64_get_unbiased_exp(fra);
1054        int e_b = ppc_float64_get_unbiased_exp(frb);
1055
1056        if (unlikely(float64_is_any_nan(fra) ||
1057                     float64_is_any_nan(frb))) {
1058            fe_flag = 1;
1059        } else if ((e_b <= -1022) || (e_b >= 1021)) {
1060            fe_flag = 1;
1061        } else if (!float64_is_zero(fra) &&
1062                   (((e_a - e_b) >= 1023) ||
1063                    ((e_a - e_b) <= -1021) ||
1064                    (e_a <= -970))) {
1065            fe_flag = 1;
1066        }
1067
1068        if (unlikely(float64_is_zero_or_denormal(frb))) {
1069            /* XB is not zero because of the above check and */
1070            /* so must be denormalized.                      */
1071            fg_flag = 1;
1072        }
1073    }
1074
1075    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1076}
1077
1078uint32_t helper_ftsqrt(uint64_t frb)
1079{
1080    int fe_flag = 0;
1081    int fg_flag = 0;
1082
1083    if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
1084        fe_flag = 1;
1085        fg_flag = 1;
1086    } else {
1087        int e_b = ppc_float64_get_unbiased_exp(frb);
1088
1089        if (unlikely(float64_is_any_nan(frb))) {
1090            fe_flag = 1;
1091        } else if (unlikely(float64_is_zero(frb))) {
1092            fe_flag = 1;
1093        } else if (unlikely(float64_is_neg(frb))) {
1094            fe_flag = 1;
1095        } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) {
1096            fe_flag = 1;
1097        }
1098
1099        if (unlikely(float64_is_zero_or_denormal(frb))) {
1100            /* XB is not zero because of the above check and */
1101            /* therefore must be denormalized.               */
1102            fg_flag = 1;
1103        }
1104    }
1105
1106    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1107}
1108
1109void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1110                  uint32_t crfD)
1111{
1112    CPU_DoubleU farg1, farg2;
1113    uint32_t ret = 0;
1114
1115    farg1.ll = arg1;
1116    farg2.ll = arg2;
1117
1118    if (unlikely(float64_is_any_nan(farg1.d) ||
1119                 float64_is_any_nan(farg2.d))) {
1120        ret = 0x01UL;
1121    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1122        ret = 0x08UL;
1123    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1124        ret = 0x04UL;
1125    } else {
1126        ret = 0x02UL;
1127    }
1128
1129    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1130    env->fpscr |= ret << FPSCR_FPRF;
1131    env->crf[crfD] = ret;
1132    if (unlikely(ret == 0x01UL
1133                 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1134                     float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1135        /* sNaN comparison */
1136        float_invalid_op_vxsnan(env, GETPC());
1137    }
1138}
1139
1140void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1141                  uint32_t crfD)
1142{
1143    CPU_DoubleU farg1, farg2;
1144    uint32_t ret = 0;
1145
1146    farg1.ll = arg1;
1147    farg2.ll = arg2;
1148
1149    if (unlikely(float64_is_any_nan(farg1.d) ||
1150                 float64_is_any_nan(farg2.d))) {
1151        ret = 0x01UL;
1152    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1153        ret = 0x08UL;
1154    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1155        ret = 0x04UL;
1156    } else {
1157        ret = 0x02UL;
1158    }
1159
1160    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1161    env->fpscr |= ret << FPSCR_FPRF;
1162    env->crf[crfD] = ret;
1163    if (unlikely(ret == 0x01UL)) {
1164        float_invalid_op_vxvc(env, 1, GETPC());
1165        if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1166            float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1167            /* sNaN comparison */
1168            float_invalid_op_vxsnan(env, GETPC());
1169        }
1170    }
1171}
1172
1173/* Single-precision floating-point conversions */
1174static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1175{
1176    CPU_FloatU u;
1177
1178    u.f = int32_to_float32(val, &env->vec_status);
1179
1180    return u.l;
1181}
1182
1183static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1184{
1185    CPU_FloatU u;
1186
1187    u.f = uint32_to_float32(val, &env->vec_status);
1188
1189    return u.l;
1190}
1191
1192static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1193{
1194    CPU_FloatU u;
1195
1196    u.l = val;
1197    /* NaN are not treated the same way IEEE 754 does */
1198    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1199        return 0;
1200    }
1201
1202    return float32_to_int32(u.f, &env->vec_status);
1203}
1204
1205static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1206{
1207    CPU_FloatU u;
1208
1209    u.l = val;
1210    /* NaN are not treated the same way IEEE 754 does */
1211    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1212        return 0;
1213    }
1214
1215    return float32_to_uint32(u.f, &env->vec_status);
1216}
1217
1218static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1219{
1220    CPU_FloatU u;
1221
1222    u.l = val;
1223    /* NaN are not treated the same way IEEE 754 does */
1224    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1225        return 0;
1226    }
1227
1228    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1229}
1230
1231static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1232{
1233    CPU_FloatU u;
1234
1235    u.l = val;
1236    /* NaN are not treated the same way IEEE 754 does */
1237    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1238        return 0;
1239    }
1240
1241    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1242}
1243
1244static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1245{
1246    CPU_FloatU u;
1247    float32 tmp;
1248
1249    u.f = int32_to_float32(val, &env->vec_status);
1250    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1251    u.f = float32_div(u.f, tmp, &env->vec_status);
1252
1253    return u.l;
1254}
1255
1256static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1257{
1258    CPU_FloatU u;
1259    float32 tmp;
1260
1261    u.f = uint32_to_float32(val, &env->vec_status);
1262    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1263    u.f = float32_div(u.f, tmp, &env->vec_status);
1264
1265    return u.l;
1266}
1267
1268static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1269{
1270    CPU_FloatU u;
1271    float32 tmp;
1272
1273    u.l = val;
1274    /* NaN are not treated the same way IEEE 754 does */
1275    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1276        return 0;
1277    }
1278    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1279    u.f = float32_mul(u.f, tmp, &env->vec_status);
1280
1281    return float32_to_int32(u.f, &env->vec_status);
1282}
1283
1284static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1285{
1286    CPU_FloatU u;
1287    float32 tmp;
1288
1289    u.l = val;
1290    /* NaN are not treated the same way IEEE 754 does */
1291    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1292        return 0;
1293    }
1294    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1295    u.f = float32_mul(u.f, tmp, &env->vec_status);
1296
1297    return float32_to_uint32(u.f, &env->vec_status);
1298}
1299
1300#define HELPER_SPE_SINGLE_CONV(name)                              \
1301    uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1302    {                                                             \
1303        return e##name(env, val);                                 \
1304    }
1305/* efscfsi */
1306HELPER_SPE_SINGLE_CONV(fscfsi);
1307/* efscfui */
1308HELPER_SPE_SINGLE_CONV(fscfui);
1309/* efscfuf */
1310HELPER_SPE_SINGLE_CONV(fscfuf);
1311/* efscfsf */
1312HELPER_SPE_SINGLE_CONV(fscfsf);
1313/* efsctsi */
1314HELPER_SPE_SINGLE_CONV(fsctsi);
1315/* efsctui */
1316HELPER_SPE_SINGLE_CONV(fsctui);
1317/* efsctsiz */
1318HELPER_SPE_SINGLE_CONV(fsctsiz);
1319/* efsctuiz */
1320HELPER_SPE_SINGLE_CONV(fsctuiz);
1321/* efsctsf */
1322HELPER_SPE_SINGLE_CONV(fsctsf);
1323/* efsctuf */
1324HELPER_SPE_SINGLE_CONV(fsctuf);
1325
1326#define HELPER_SPE_VECTOR_CONV(name)                            \
1327    uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1328    {                                                           \
1329        return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1330            (uint64_t)e##name(env, val);                        \
1331    }
1332/* evfscfsi */
1333HELPER_SPE_VECTOR_CONV(fscfsi);
1334/* evfscfui */
1335HELPER_SPE_VECTOR_CONV(fscfui);
1336/* evfscfuf */
1337HELPER_SPE_VECTOR_CONV(fscfuf);
1338/* evfscfsf */
1339HELPER_SPE_VECTOR_CONV(fscfsf);
1340/* evfsctsi */
1341HELPER_SPE_VECTOR_CONV(fsctsi);
1342/* evfsctui */
1343HELPER_SPE_VECTOR_CONV(fsctui);
1344/* evfsctsiz */
1345HELPER_SPE_VECTOR_CONV(fsctsiz);
1346/* evfsctuiz */
1347HELPER_SPE_VECTOR_CONV(fsctuiz);
1348/* evfsctsf */
1349HELPER_SPE_VECTOR_CONV(fsctsf);
1350/* evfsctuf */
1351HELPER_SPE_VECTOR_CONV(fsctuf);
1352
1353/* Single-precision floating-point arithmetic */
1354static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1355{
1356    CPU_FloatU u1, u2;
1357
1358    u1.l = op1;
1359    u2.l = op2;
1360    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1361    return u1.l;
1362}
1363
1364static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1365{
1366    CPU_FloatU u1, u2;
1367
1368    u1.l = op1;
1369    u2.l = op2;
1370    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1371    return u1.l;
1372}
1373
1374static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1375{
1376    CPU_FloatU u1, u2;
1377
1378    u1.l = op1;
1379    u2.l = op2;
1380    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1381    return u1.l;
1382}
1383
1384static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1385{
1386    CPU_FloatU u1, u2;
1387
1388    u1.l = op1;
1389    u2.l = op2;
1390    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1391    return u1.l;
1392}
1393
1394#define HELPER_SPE_SINGLE_ARITH(name)                                   \
1395    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1396    {                                                                   \
1397        return e##name(env, op1, op2);                                  \
1398    }
1399/* efsadd */
1400HELPER_SPE_SINGLE_ARITH(fsadd);
1401/* efssub */
1402HELPER_SPE_SINGLE_ARITH(fssub);
1403/* efsmul */
1404HELPER_SPE_SINGLE_ARITH(fsmul);
1405/* efsdiv */
1406HELPER_SPE_SINGLE_ARITH(fsdiv);
1407
1408#define HELPER_SPE_VECTOR_ARITH(name)                                   \
1409    uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1410    {                                                                   \
1411        return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1412            (uint64_t)e##name(env, op1, op2);                           \
1413    }
1414/* evfsadd */
1415HELPER_SPE_VECTOR_ARITH(fsadd);
1416/* evfssub */
1417HELPER_SPE_VECTOR_ARITH(fssub);
1418/* evfsmul */
1419HELPER_SPE_VECTOR_ARITH(fsmul);
1420/* evfsdiv */
1421HELPER_SPE_VECTOR_ARITH(fsdiv);
1422
1423/* Single-precision floating-point comparisons */
1424static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1425{
1426    CPU_FloatU u1, u2;
1427
1428    u1.l = op1;
1429    u2.l = op2;
1430    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1431}
1432
1433static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1434{
1435    CPU_FloatU u1, u2;
1436
1437    u1.l = op1;
1438    u2.l = op2;
1439    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1440}
1441
1442static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1443{
1444    CPU_FloatU u1, u2;
1445
1446    u1.l = op1;
1447    u2.l = op2;
1448    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1449}
1450
1451static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1452{
1453    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1454    return efscmplt(env, op1, op2);
1455}
1456
1457static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1458{
1459    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1460    return efscmpgt(env, op1, op2);
1461}
1462
1463static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1464{
1465    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1466    return efscmpeq(env, op1, op2);
1467}
1468
1469#define HELPER_SINGLE_SPE_CMP(name)                                     \
1470    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1471    {                                                                   \
1472        return e##name(env, op1, op2);                                  \
1473    }
1474/* efststlt */
1475HELPER_SINGLE_SPE_CMP(fststlt);
1476/* efststgt */
1477HELPER_SINGLE_SPE_CMP(fststgt);
1478/* efststeq */
1479HELPER_SINGLE_SPE_CMP(fststeq);
1480/* efscmplt */
1481HELPER_SINGLE_SPE_CMP(fscmplt);
1482/* efscmpgt */
1483HELPER_SINGLE_SPE_CMP(fscmpgt);
1484/* efscmpeq */
1485HELPER_SINGLE_SPE_CMP(fscmpeq);
1486
1487static inline uint32_t evcmp_merge(int t0, int t1)
1488{
1489    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1490}
1491
1492#define HELPER_VECTOR_SPE_CMP(name)                                     \
1493    uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1494    {                                                                   \
1495        return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1496                           e##name(env, op1, op2));                     \
1497    }
1498/* evfststlt */
1499HELPER_VECTOR_SPE_CMP(fststlt);
1500/* evfststgt */
1501HELPER_VECTOR_SPE_CMP(fststgt);
1502/* evfststeq */
1503HELPER_VECTOR_SPE_CMP(fststeq);
1504/* evfscmplt */
1505HELPER_VECTOR_SPE_CMP(fscmplt);
1506/* evfscmpgt */
1507HELPER_VECTOR_SPE_CMP(fscmpgt);
1508/* evfscmpeq */
1509HELPER_VECTOR_SPE_CMP(fscmpeq);
1510
1511/* Double-precision floating-point conversion */
1512uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1513{
1514    CPU_DoubleU u;
1515
1516    u.d = int32_to_float64(val, &env->vec_status);
1517
1518    return u.ll;
1519}
1520
1521uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1522{
1523    CPU_DoubleU u;
1524
1525    u.d = int64_to_float64(val, &env->vec_status);
1526
1527    return u.ll;
1528}
1529
1530uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1531{
1532    CPU_DoubleU u;
1533
1534    u.d = uint32_to_float64(val, &env->vec_status);
1535
1536    return u.ll;
1537}
1538
1539uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1540{
1541    CPU_DoubleU u;
1542
1543    u.d = uint64_to_float64(val, &env->vec_status);
1544
1545    return u.ll;
1546}
1547
1548uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1549{
1550    CPU_DoubleU u;
1551
1552    u.ll = val;
1553    /* NaN are not treated the same way IEEE 754 does */
1554    if (unlikely(float64_is_any_nan(u.d))) {
1555        return 0;
1556    }
1557
1558    return float64_to_int32(u.d, &env->vec_status);
1559}
1560
1561uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1562{
1563    CPU_DoubleU u;
1564
1565    u.ll = val;
1566    /* NaN are not treated the same way IEEE 754 does */
1567    if (unlikely(float64_is_any_nan(u.d))) {
1568        return 0;
1569    }
1570
1571    return float64_to_uint32(u.d, &env->vec_status);
1572}
1573
1574uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1575{
1576    CPU_DoubleU u;
1577
1578    u.ll = val;
1579    /* NaN are not treated the same way IEEE 754 does */
1580    if (unlikely(float64_is_any_nan(u.d))) {
1581        return 0;
1582    }
1583
1584    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1585}
1586
1587uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1588{
1589    CPU_DoubleU u;
1590
1591    u.ll = val;
1592    /* NaN are not treated the same way IEEE 754 does */
1593    if (unlikely(float64_is_any_nan(u.d))) {
1594        return 0;
1595    }
1596
1597    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1598}
1599
1600uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1601{
1602    CPU_DoubleU u;
1603
1604    u.ll = val;
1605    /* NaN are not treated the same way IEEE 754 does */
1606    if (unlikely(float64_is_any_nan(u.d))) {
1607        return 0;
1608    }
1609
1610    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1611}
1612
1613uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1614{
1615    CPU_DoubleU u;
1616
1617    u.ll = val;
1618    /* NaN are not treated the same way IEEE 754 does */
1619    if (unlikely(float64_is_any_nan(u.d))) {
1620        return 0;
1621    }
1622
1623    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1624}
1625
1626uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1627{
1628    CPU_DoubleU u;
1629    float64 tmp;
1630
1631    u.d = int32_to_float64(val, &env->vec_status);
1632    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1633    u.d = float64_div(u.d, tmp, &env->vec_status);
1634
1635    return u.ll;
1636}
1637
1638uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1639{
1640    CPU_DoubleU u;
1641    float64 tmp;
1642
1643    u.d = uint32_to_float64(val, &env->vec_status);
1644    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1645    u.d = float64_div(u.d, tmp, &env->vec_status);
1646
1647    return u.ll;
1648}
1649
1650uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1651{
1652    CPU_DoubleU u;
1653    float64 tmp;
1654
1655    u.ll = val;
1656    /* NaN are not treated the same way IEEE 754 does */
1657    if (unlikely(float64_is_any_nan(u.d))) {
1658        return 0;
1659    }
1660    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1661    u.d = float64_mul(u.d, tmp, &env->vec_status);
1662
1663    return float64_to_int32(u.d, &env->vec_status);
1664}
1665
1666uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1667{
1668    CPU_DoubleU u;
1669    float64 tmp;
1670
1671    u.ll = val;
1672    /* NaN are not treated the same way IEEE 754 does */
1673    if (unlikely(float64_is_any_nan(u.d))) {
1674        return 0;
1675    }
1676    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1677    u.d = float64_mul(u.d, tmp, &env->vec_status);
1678
1679    return float64_to_uint32(u.d, &env->vec_status);
1680}
1681
1682uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1683{
1684    CPU_DoubleU u1;
1685    CPU_FloatU u2;
1686
1687    u1.ll = val;
1688    u2.f = float64_to_float32(u1.d, &env->vec_status);
1689
1690    return u2.l;
1691}
1692
1693uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1694{
1695    CPU_DoubleU u2;
1696    CPU_FloatU u1;
1697
1698    u1.l = val;
1699    u2.d = float32_to_float64(u1.f, &env->vec_status);
1700
1701    return u2.ll;
1702}
1703
1704/* Double precision fixed-point arithmetic */
1705uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1706{
1707    CPU_DoubleU u1, u2;
1708
1709    u1.ll = op1;
1710    u2.ll = op2;
1711    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1712    return u1.ll;
1713}
1714
1715uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1716{
1717    CPU_DoubleU u1, u2;
1718
1719    u1.ll = op1;
1720    u2.ll = op2;
1721    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1722    return u1.ll;
1723}
1724
1725uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1726{
1727    CPU_DoubleU u1, u2;
1728
1729    u1.ll = op1;
1730    u2.ll = op2;
1731    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1732    return u1.ll;
1733}
1734
1735uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1736{
1737    CPU_DoubleU u1, u2;
1738
1739    u1.ll = op1;
1740    u2.ll = op2;
1741    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1742    return u1.ll;
1743}
1744
1745/* Double precision floating point helpers */
1746uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1747{
1748    CPU_DoubleU u1, u2;
1749
1750    u1.ll = op1;
1751    u2.ll = op2;
1752    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1753}
1754
1755uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1756{
1757    CPU_DoubleU u1, u2;
1758
1759    u1.ll = op1;
1760    u2.ll = op2;
1761    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1762}
1763
1764uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1765{
1766    CPU_DoubleU u1, u2;
1767
1768    u1.ll = op1;
1769    u2.ll = op2;
1770    return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1771}
1772
1773uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1774{
1775    /* XXX: TODO: test special values (NaN, infinites, ...) */
1776    return helper_efdtstlt(env, op1, op2);
1777}
1778
1779uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1780{
1781    /* XXX: TODO: test special values (NaN, infinites, ...) */
1782    return helper_efdtstgt(env, op1, op2);
1783}
1784
1785uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1786{
1787    /* XXX: TODO: test special values (NaN, infinites, ...) */
1788    return helper_efdtsteq(env, op1, op2);
1789}
1790
1791#define float64_to_float64(x, env) x
1792
1793
1794/*
1795 * VSX_ADD_SUB - VSX floating point add/subract
1796 *   name  - instruction mnemonic
1797 *   op    - operation (add or sub)
1798 *   nels  - number of elements (1, 2 or 4)
1799 *   tp    - type (float32 or float64)
1800 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1801 *   sfprf - set FPRF
1802 */
1803#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
1804void helper_##name(CPUPPCState *env, ppc_vsr_t *xt,                          \
1805                   ppc_vsr_t *xa, ppc_vsr_t *xb)                             \
1806{                                                                            \
1807    ppc_vsr_t t = *xt;                                                       \
1808    int i;                                                                   \
1809                                                                             \
1810    helper_reset_fpstatus(env);                                              \
1811                                                                             \
1812    for (i = 0; i < nels; i++) {                                             \
1813        float_status tstat = env->fp_status;                                 \
1814        set_float_exception_flags(0, &tstat);                                \
1815        t.fld = tp##_##op(xa->fld, xb->fld, &tstat);                         \
1816        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1817                                                                             \
1818        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1819            float_invalid_op_addsub(env, sfprf, GETPC(),                     \
1820                                    tp##_classify(xa->fld) |                 \
1821                                    tp##_classify(xb->fld));                 \
1822        }                                                                    \
1823                                                                             \
1824        if (r2sp) {                                                          \
1825            t.fld = helper_frsp(env, t.fld);                                 \
1826        }                                                                    \
1827                                                                             \
1828        if (sfprf) {                                                         \
1829            helper_compute_fprf_float64(env, t.fld);                         \
1830        }                                                                    \
1831    }                                                                        \
1832    *xt = t;                                                                 \
1833    do_float_check_status(env, GETPC());                                     \
1834}
1835
1836VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1837VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1838VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1839VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1840VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1841VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1842VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1843VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1844
1845void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
1846                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1847{
1848    ppc_vsr_t t = *xt;
1849    float_status tstat;
1850
1851    helper_reset_fpstatus(env);
1852
1853    tstat = env->fp_status;
1854    if (unlikely(Rc(opcode) != 0)) {
1855        tstat.float_rounding_mode = float_round_to_odd;
1856    }
1857
1858    set_float_exception_flags(0, &tstat);
1859    t.f128 = float128_add(xa->f128, xb->f128, &tstat);
1860    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1861
1862    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1863        float_invalid_op_addsub(env, 1, GETPC(),
1864                                float128_classify(xa->f128) |
1865                                float128_classify(xb->f128));
1866    }
1867
1868    helper_compute_fprf_float128(env, t.f128);
1869
1870    *xt = t;
1871    do_float_check_status(env, GETPC());
1872}
1873
1874/*
1875 * VSX_MUL - VSX floating point multiply
1876 *   op    - instruction mnemonic
1877 *   nels  - number of elements (1, 2 or 4)
1878 *   tp    - type (float32 or float64)
1879 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1880 *   sfprf - set FPRF
1881 */
1882#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
1883void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                            \
1884                 ppc_vsr_t *xa, ppc_vsr_t *xb)                               \
1885{                                                                            \
1886    ppc_vsr_t t = *xt;                                                       \
1887    int i;                                                                   \
1888                                                                             \
1889    helper_reset_fpstatus(env);                                              \
1890                                                                             \
1891    for (i = 0; i < nels; i++) {                                             \
1892        float_status tstat = env->fp_status;                                 \
1893        set_float_exception_flags(0, &tstat);                                \
1894        t.fld = tp##_mul(xa->fld, xb->fld, &tstat);                          \
1895        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1896                                                                             \
1897        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1898            float_invalid_op_mul(env, sfprf, GETPC(),                        \
1899                                 tp##_classify(xa->fld) |                    \
1900                                 tp##_classify(xb->fld));                    \
1901        }                                                                    \
1902                                                                             \
1903        if (r2sp) {                                                          \
1904            t.fld = helper_frsp(env, t.fld);                                 \
1905        }                                                                    \
1906                                                                             \
1907        if (sfprf) {                                                         \
1908            helper_compute_fprf_float64(env, t.fld);                         \
1909        }                                                                    \
1910    }                                                                        \
1911                                                                             \
1912    *xt = t;                                                                 \
1913    do_float_check_status(env, GETPC());                                     \
1914}
1915
1916VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1917VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1918VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1919VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1920
1921void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
1922                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1923{
1924    ppc_vsr_t t = *xt;
1925    float_status tstat;
1926
1927    helper_reset_fpstatus(env);
1928    tstat = env->fp_status;
1929    if (unlikely(Rc(opcode) != 0)) {
1930        tstat.float_rounding_mode = float_round_to_odd;
1931    }
1932
1933    set_float_exception_flags(0, &tstat);
1934    t.f128 = float128_mul(xa->f128, xb->f128, &tstat);
1935    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1936
1937    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1938        float_invalid_op_mul(env, 1, GETPC(),
1939                             float128_classify(xa->f128) |
1940                             float128_classify(xb->f128));
1941    }
1942    helper_compute_fprf_float128(env, t.f128);
1943
1944    *xt = t;
1945    do_float_check_status(env, GETPC());
1946}
1947
1948/*
1949 * VSX_DIV - VSX floating point divide
1950 *   op    - instruction mnemonic
1951 *   nels  - number of elements (1, 2 or 4)
1952 *   tp    - type (float32 or float64)
1953 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1954 *   sfprf - set FPRF
1955 */
1956#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
1957void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
1958                 ppc_vsr_t *xa, ppc_vsr_t *xb)                                \
1959{                                                                             \
1960    ppc_vsr_t t = *xt;                                                        \
1961    int i;                                                                    \
1962                                                                              \
1963    helper_reset_fpstatus(env);                                               \
1964                                                                              \
1965    for (i = 0; i < nels; i++) {                                              \
1966        float_status tstat = env->fp_status;                                  \
1967        set_float_exception_flags(0, &tstat);                                 \
1968        t.fld = tp##_div(xa->fld, xb->fld, &tstat);                           \
1969        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1970                                                                              \
1971        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1972            float_invalid_op_div(env, sfprf, GETPC(),                         \
1973                                 tp##_classify(xa->fld) |                     \
1974                                 tp##_classify(xb->fld));                     \
1975        }                                                                     \
1976        if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {   \
1977            float_zero_divide_excp(env, GETPC());                             \
1978        }                                                                     \
1979                                                                              \
1980        if (r2sp) {                                                           \
1981            t.fld = helper_frsp(env, t.fld);                                  \
1982        }                                                                     \
1983                                                                              \
1984        if (sfprf) {                                                          \
1985            helper_compute_fprf_float64(env, t.fld);                          \
1986        }                                                                     \
1987    }                                                                         \
1988                                                                              \
1989    *xt = t;                                                                  \
1990    do_float_check_status(env, GETPC());                                      \
1991}
1992
1993VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1994VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1995VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1996VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1997
1998void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
1999                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
2000{
2001    ppc_vsr_t t = *xt;
2002    float_status tstat;
2003
2004    helper_reset_fpstatus(env);
2005    tstat = env->fp_status;
2006    if (unlikely(Rc(opcode) != 0)) {
2007        tstat.float_rounding_mode = float_round_to_odd;
2008    }
2009
2010    set_float_exception_flags(0, &tstat);
2011    t.f128 = float128_div(xa->f128, xb->f128, &tstat);
2012    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2013
2014    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
2015        float_invalid_op_div(env, 1, GETPC(),
2016                             float128_classify(xa->f128) |
2017                             float128_classify(xb->f128));
2018    }
2019    if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
2020        float_zero_divide_excp(env, GETPC());
2021    }
2022
2023    helper_compute_fprf_float128(env, t.f128);
2024    *xt = t;
2025    do_float_check_status(env, GETPC());
2026}
2027
2028/*
2029 * VSX_RE  - VSX floating point reciprocal estimate
2030 *   op    - instruction mnemonic
2031 *   nels  - number of elements (1, 2 or 4)
2032 *   tp    - type (float32 or float64)
2033 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2034 *   sfprf - set FPRF
2035 */
2036#define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
2037void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)              \
2038{                                                                             \
2039    ppc_vsr_t t = *xt;                                                        \
2040    int i;                                                                    \
2041                                                                              \
2042    helper_reset_fpstatus(env);                                               \
2043                                                                              \
2044    for (i = 0; i < nels; i++) {                                              \
2045        if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) {      \
2046            float_invalid_op_vxsnan(env, GETPC());                            \
2047        }                                                                     \
2048        t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status);                 \
2049                                                                              \
2050        if (r2sp) {                                                           \
2051            t.fld = helper_frsp(env, t.fld);                                  \
2052        }                                                                     \
2053                                                                              \
2054        if (sfprf) {                                                          \
2055            helper_compute_fprf_float64(env, t.fld);                          \
2056        }                                                                     \
2057    }                                                                         \
2058                                                                              \
2059    *xt = t;                                                                  \
2060    do_float_check_status(env, GETPC());                                      \
2061}
2062
2063VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
2064VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
2065VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
2066VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
2067
2068/*
2069 * VSX_SQRT - VSX floating point square root
2070 *   op    - instruction mnemonic
2071 *   nels  - number of elements (1, 2 or 4)
2072 *   tp    - type (float32 or float64)
2073 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2074 *   sfprf - set FPRF
2075 */
2076#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
2077void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2078{                                                                            \
2079    ppc_vsr_t t = *xt;                                                       \
2080    int i;                                                                   \
2081                                                                             \
2082    helper_reset_fpstatus(env);                                              \
2083                                                                             \
2084    for (i = 0; i < nels; i++) {                                             \
2085        float_status tstat = env->fp_status;                                 \
2086        set_float_exception_flags(0, &tstat);                                \
2087        t.fld = tp##_sqrt(xb->fld, &tstat);                                  \
2088        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2089                                                                             \
2090        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2091            if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) {            \
2092                float_invalid_op_vxsqrt(env, sfprf, GETPC());                \
2093            } else if (tp##_is_signaling_nan(xb->fld, &tstat)) {             \
2094                float_invalid_op_vxsnan(env, GETPC());                       \
2095            }                                                                \
2096        }                                                                    \
2097                                                                             \
2098        if (r2sp) {                                                          \
2099            t.fld = helper_frsp(env, t.fld);                                 \
2100        }                                                                    \
2101                                                                             \
2102        if (sfprf) {                                                         \
2103            helper_compute_fprf_float64(env, t.fld);                         \
2104        }                                                                    \
2105    }                                                                        \
2106                                                                             \
2107    *xt = t;                                                                 \
2108    do_float_check_status(env, GETPC());                                     \
2109}
2110
2111VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2112VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2113VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2114VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2115
2116/*
2117 *VSX_RSQRTE - VSX floating point reciprocal square root estimate
2118 *   op    - instruction mnemonic
2119 *   nels  - number of elements (1, 2 or 4)
2120 *   tp    - type (float32 or float64)
2121 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2122 *   sfprf - set FPRF
2123 */
2124#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
2125void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2126{                                                                            \
2127    ppc_vsr_t t = *xt;                                                       \
2128    int i;                                                                   \
2129                                                                             \
2130    helper_reset_fpstatus(env);                                              \
2131                                                                             \
2132    for (i = 0; i < nels; i++) {                                             \
2133        float_status tstat = env->fp_status;                                 \
2134        set_float_exception_flags(0, &tstat);                                \
2135        t.fld = tp##_sqrt(xb->fld, &tstat);                                  \
2136        t.fld = tp##_div(tp##_one, t.fld, &tstat);                           \
2137        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2138                                                                             \
2139        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2140            if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) {            \
2141                float_invalid_op_vxsqrt(env, sfprf, GETPC());                \
2142            } else if (tp##_is_signaling_nan(xb->fld, &tstat)) {             \
2143                float_invalid_op_vxsnan(env, GETPC());                       \
2144            }                                                                \
2145        }                                                                    \
2146                                                                             \
2147        if (r2sp) {                                                          \
2148            t.fld = helper_frsp(env, t.fld);                                 \
2149        }                                                                    \
2150                                                                             \
2151        if (sfprf) {                                                         \
2152            helper_compute_fprf_float64(env, t.fld);                         \
2153        }                                                                    \
2154    }                                                                        \
2155                                                                             \
2156    *xt = t;                                                                 \
2157    do_float_check_status(env, GETPC());                                     \
2158}
2159
2160VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2161VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2162VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2163VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2164
2165/*
2166 * VSX_TDIV - VSX floating point test for divide
2167 *   op    - instruction mnemonic
2168 *   nels  - number of elements (1, 2 or 4)
2169 *   tp    - type (float32 or float64)
2170 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2171 *   emin  - minimum unbiased exponent
2172 *   emax  - maximum unbiased exponent
2173 *   nbits - number of fraction bits
2174 */
2175#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2176void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
2177                 ppc_vsr_t *xa, ppc_vsr_t *xb)                          \
2178{                                                                       \
2179    int i;                                                              \
2180    int fe_flag = 0;                                                    \
2181    int fg_flag = 0;                                                    \
2182                                                                        \
2183    for (i = 0; i < nels; i++) {                                        \
2184        if (unlikely(tp##_is_infinity(xa->fld) ||                       \
2185                     tp##_is_infinity(xb->fld) ||                       \
2186                     tp##_is_zero(xb->fld))) {                          \
2187            fe_flag = 1;                                                \
2188            fg_flag = 1;                                                \
2189        } else {                                                        \
2190            int e_a = ppc_##tp##_get_unbiased_exp(xa->fld);             \
2191            int e_b = ppc_##tp##_get_unbiased_exp(xb->fld);             \
2192                                                                        \
2193            if (unlikely(tp##_is_any_nan(xa->fld) ||                    \
2194                         tp##_is_any_nan(xb->fld))) {                   \
2195                fe_flag = 1;                                            \
2196            } else if ((e_b <= emin) || (e_b >= (emax - 2))) {          \
2197                fe_flag = 1;                                            \
2198            } else if (!tp##_is_zero(xa->fld) &&                        \
2199                       (((e_a - e_b) >= emax) ||                        \
2200                        ((e_a - e_b) <= (emin + 1)) ||                  \
2201                        (e_a <= (emin + nbits)))) {                     \
2202                fe_flag = 1;                                            \
2203            }                                                           \
2204                                                                        \
2205            if (unlikely(tp##_is_zero_or_denormal(xb->fld))) {          \
2206                /*                                                      \
2207                 * XB is not zero because of the above check and so     \
2208                 * must be denormalized.                                \
2209                 */                                                     \
2210                fg_flag = 1;                                            \
2211            }                                                           \
2212        }                                                               \
2213    }                                                                   \
2214                                                                        \
2215    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2216}
2217
2218VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2219VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2220VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2221
2222/*
2223 * VSX_TSQRT - VSX floating point test for square root
2224 *   op    - instruction mnemonic
2225 *   nels  - number of elements (1, 2 or 4)
2226 *   tp    - type (float32 or float64)
2227 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2228 *   emin  - minimum unbiased exponent
2229 *   emax  - maximum unbiased exponent
2230 *   nbits - number of fraction bits
2231 */
2232#define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2233void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)      \
2234{                                                                       \
2235    int i;                                                              \
2236    int fe_flag = 0;                                                    \
2237    int fg_flag = 0;                                                    \
2238                                                                        \
2239    for (i = 0; i < nels; i++) {                                        \
2240        if (unlikely(tp##_is_infinity(xb->fld) ||                       \
2241                     tp##_is_zero(xb->fld))) {                          \
2242            fe_flag = 1;                                                \
2243            fg_flag = 1;                                                \
2244        } else {                                                        \
2245            int e_b = ppc_##tp##_get_unbiased_exp(xb->fld);             \
2246                                                                        \
2247            if (unlikely(tp##_is_any_nan(xb->fld))) {                   \
2248                fe_flag = 1;                                            \
2249            } else if (unlikely(tp##_is_zero(xb->fld))) {               \
2250                fe_flag = 1;                                            \
2251            } else if (unlikely(tp##_is_neg(xb->fld))) {                \
2252                fe_flag = 1;                                            \
2253            } else if (!tp##_is_zero(xb->fld) &&                        \
2254                       (e_b <= (emin + nbits))) {                       \
2255                fe_flag = 1;                                            \
2256            }                                                           \
2257                                                                        \
2258            if (unlikely(tp##_is_zero_or_denormal(xb->fld))) {          \
2259                /*                                                      \
2260                 * XB is not zero because of the above check and        \
2261                 * therefore must be denormalized.                      \
2262                 */                                                     \
2263                fg_flag = 1;                                            \
2264            }                                                           \
2265        }                                                               \
2266    }                                                                   \
2267                                                                        \
2268    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2269}
2270
2271VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2272VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2273VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2274
2275/*
2276 * VSX_MADD - VSX floating point muliply/add variations
2277 *   op    - instruction mnemonic
2278 *   nels  - number of elements (1, 2 or 4)
2279 *   tp    - type (float32 or float64)
2280 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2281 *   maddflgs - flags for the float*muladd routine that control the
2282 *           various forms (madd, msub, nmadd, nmsub)
2283 *   sfprf - set FPRF
2284 */
2285#define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp)                    \
2286void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
2287                 ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c)                   \
2288{                                                                             \
2289    ppc_vsr_t t = *xt;                                                        \
2290    int i;                                                                    \
2291                                                                              \
2292    helper_reset_fpstatus(env);                                               \
2293                                                                              \
2294    for (i = 0; i < nels; i++) {                                              \
2295        float_status tstat = env->fp_status;                                  \
2296        set_float_exception_flags(0, &tstat);                                 \
2297        if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2298            /*                                                                \
2299             * Avoid double rounding errors by rounding the intermediate      \
2300             * result to odd.                                                 \
2301             */                                                               \
2302            set_float_rounding_mode(float_round_to_zero, &tstat);             \
2303            t.fld = tp##_muladd(xa->fld, b->fld, c->fld,                      \
2304                                maddflgs, &tstat);                            \
2305            t.fld |= (get_float_exception_flags(&tstat) &                     \
2306                      float_flag_inexact) != 0;                               \
2307        } else {                                                              \
2308            t.fld = tp##_muladd(xa->fld, b->fld, c->fld,                      \
2309                                maddflgs, &tstat);                            \
2310        }                                                                     \
2311        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2312                                                                              \
2313        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2314            tp##_maddsub_update_excp(env, xa->fld, b->fld,                    \
2315                                     c->fld, maddflgs, GETPC());              \
2316        }                                                                     \
2317                                                                              \
2318        if (r2sp) {                                                           \
2319            t.fld = helper_frsp(env, t.fld);                                  \
2320        }                                                                     \
2321                                                                              \
2322        if (sfprf) {                                                          \
2323            helper_compute_fprf_float64(env, t.fld);                          \
2324        }                                                                     \
2325    }                                                                         \
2326    *xt = t;                                                                  \
2327    do_float_check_status(env, GETPC());                                      \
2328}
2329
2330VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0)
2331VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0)
2332VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0)
2333VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0)
2334VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1)
2335VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1)
2336VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1)
2337VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1)
2338
2339VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0)
2340VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0)
2341VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0)
2342VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0)
2343
2344VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0)
2345VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0)
2346VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0)
2347VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0)
2348
2349/*
2350 * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2351 *   op    - instruction mnemonic
2352 *   cmp   - comparison operation
2353 *   exp   - expected result of comparison
2354 *   svxvc - set VXVC bit
2355 */
2356#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc)                                \
2357void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
2358                 ppc_vsr_t *xa, ppc_vsr_t *xb)                                \
2359{                                                                             \
2360    ppc_vsr_t t = *xt;                                                        \
2361    bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false;            \
2362                                                                              \
2363    if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||             \
2364        float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {             \
2365        vxsnan_flag = true;                                                   \
2366        if (fpscr_ve == 0 && svxvc) {                                         \
2367            vxvc_flag = true;                                                 \
2368        }                                                                     \
2369    } else if (svxvc) {                                                       \
2370        vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||     \
2371            float64_is_quiet_nan(xb->VsrD(0), &env->fp_status);               \
2372    }                                                                         \
2373    if (vxsnan_flag) {                                                        \
2374        float_invalid_op_vxsnan(env, GETPC());                                \
2375    }                                                                         \
2376    if (vxvc_flag) {                                                          \
2377        float_invalid_op_vxvc(env, 0, GETPC());                               \
2378    }                                                                         \
2379    vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag);                        \
2380                                                                              \
2381    if (!vex_flag) {                                                          \
2382        if (float64_##cmp(xb->VsrD(0), xa->VsrD(0),                           \
2383                          &env->fp_status) == exp) {                          \
2384            t.VsrD(0) = -1;                                                   \
2385            t.VsrD(1) = 0;                                                    \
2386        } else {                                                              \
2387            t.VsrD(0) = 0;                                                    \
2388            t.VsrD(1) = 0;                                                    \
2389        }                                                                     \
2390    }                                                                         \
2391    *xt = t;                                                                  \
2392    do_float_check_status(env, GETPC());                                      \
2393}
2394
2395VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2396VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2397VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2398VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2399
2400void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
2401                       ppc_vsr_t *xa, ppc_vsr_t *xb)
2402{
2403    int64_t exp_a, exp_b;
2404    uint32_t cc;
2405
2406    exp_a = extract64(xa->VsrD(0), 52, 11);
2407    exp_b = extract64(xb->VsrD(0), 52, 11);
2408
2409    if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||
2410                 float64_is_any_nan(xb->VsrD(0)))) {
2411        cc = CRF_SO;
2412    } else {
2413        if (exp_a < exp_b) {
2414            cc = CRF_LT;
2415        } else if (exp_a > exp_b) {
2416            cc = CRF_GT;
2417        } else {
2418            cc = CRF_EQ;
2419        }
2420    }
2421
2422    env->fpscr &= ~(0x0F << FPSCR_FPRF);
2423    env->fpscr |= cc << FPSCR_FPRF;
2424    env->crf[BF(opcode)] = cc;
2425
2426    do_float_check_status(env, GETPC());
2427}
2428
2429void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
2430                       ppc_vsr_t *xa, ppc_vsr_t *xb)
2431{
2432    int64_t exp_a, exp_b;
2433    uint32_t cc;
2434
2435    exp_a = extract64(xa->VsrD(0), 48, 15);
2436    exp_b = extract64(xb->VsrD(0), 48, 15);
2437
2438    if (unlikely(float128_is_any_nan(xa->f128) ||
2439                 float128_is_any_nan(xb->f128))) {
2440        cc = CRF_SO;
2441    } else {
2442        if (exp_a < exp_b) {
2443            cc = CRF_LT;
2444        } else if (exp_a > exp_b) {
2445            cc = CRF_GT;
2446        } else {
2447            cc = CRF_EQ;
2448        }
2449    }
2450
2451    env->fpscr &= ~(0x0F << FPSCR_FPRF);
2452    env->fpscr |= cc << FPSCR_FPRF;
2453    env->crf[BF(opcode)] = cc;
2454
2455    do_float_check_status(env, GETPC());
2456}
2457
2458#define VSX_SCALAR_CMP(op, ordered)                                      \
2459void helper_##op(CPUPPCState *env, uint32_t opcode,                      \
2460                 ppc_vsr_t *xa, ppc_vsr_t *xb)                           \
2461{                                                                        \
2462    uint32_t cc = 0;                                                     \
2463    bool vxsnan_flag = false, vxvc_flag = false;                         \
2464                                                                         \
2465    helper_reset_fpstatus(env);                                          \
2466                                                                         \
2467    if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||        \
2468        float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {        \
2469        vxsnan_flag = true;                                              \
2470        cc = CRF_SO;                                                     \
2471        if (fpscr_ve == 0 && ordered) {                                  \
2472            vxvc_flag = true;                                            \
2473        }                                                                \
2474    } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||     \
2475               float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) {     \
2476        cc = CRF_SO;                                                     \
2477        if (ordered) {                                                   \
2478            vxvc_flag = true;                                            \
2479        }                                                                \
2480    }                                                                    \
2481    if (vxsnan_flag) {                                                   \
2482        float_invalid_op_vxsnan(env, GETPC());                           \
2483    }                                                                    \
2484    if (vxvc_flag) {                                                     \
2485        float_invalid_op_vxvc(env, 0, GETPC());                          \
2486    }                                                                    \
2487                                                                         \
2488    if (float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) {         \
2489        cc |= CRF_LT;                                                    \
2490    } else if (!float64_le(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \
2491        cc |= CRF_GT;                                                    \
2492    } else {                                                             \
2493        cc |= CRF_EQ;                                                    \
2494    }                                                                    \
2495                                                                         \
2496    env->fpscr &= ~(0x0F << FPSCR_FPRF);                                 \
2497    env->fpscr |= cc << FPSCR_FPRF;                                      \
2498    env->crf[BF(opcode)] = cc;                                           \
2499                                                                         \
2500    do_float_check_status(env, GETPC());                                 \
2501}
2502
2503VSX_SCALAR_CMP(xscmpodp, 1)
2504VSX_SCALAR_CMP(xscmpudp, 0)
2505
2506#define VSX_SCALAR_CMPQ(op, ordered)                                    \
2507void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
2508                 ppc_vsr_t *xa, ppc_vsr_t *xb)                          \
2509{                                                                       \
2510    uint32_t cc = 0;                                                    \
2511    bool vxsnan_flag = false, vxvc_flag = false;                        \
2512                                                                        \
2513    helper_reset_fpstatus(env);                                         \
2514                                                                        \
2515    if (float128_is_signaling_nan(xa->f128, &env->fp_status) ||         \
2516        float128_is_signaling_nan(xb->f128, &env->fp_status)) {         \
2517        vxsnan_flag = true;                                             \
2518        cc = CRF_SO;                                                    \
2519        if (fpscr_ve == 0 && ordered) {                                 \
2520            vxvc_flag = true;                                           \
2521        }                                                               \
2522    } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) ||      \
2523               float128_is_quiet_nan(xb->f128, &env->fp_status)) {      \
2524        cc = CRF_SO;                                                    \
2525        if (ordered) {                                                  \
2526            vxvc_flag = true;                                           \
2527        }                                                               \
2528    }                                                                   \
2529    if (vxsnan_flag) {                                                  \
2530        float_invalid_op_vxsnan(env, GETPC());                          \
2531    }                                                                   \
2532    if (vxvc_flag) {                                                    \
2533        float_invalid_op_vxvc(env, 0, GETPC());                         \
2534    }                                                                   \
2535                                                                        \
2536    if (float128_lt(xa->f128, xb->f128, &env->fp_status)) {             \
2537        cc |= CRF_LT;                                                   \
2538    } else if (!float128_le(xa->f128, xb->f128, &env->fp_status)) {     \
2539        cc |= CRF_GT;                                                   \
2540    } else {                                                            \
2541        cc |= CRF_EQ;                                                   \
2542    }                                                                   \
2543                                                                        \
2544    env->fpscr &= ~(0x0F << FPSCR_FPRF);                                \
2545    env->fpscr |= cc << FPSCR_FPRF;                                     \
2546    env->crf[BF(opcode)] = cc;                                          \
2547                                                                        \
2548    do_float_check_status(env, GETPC());                                \
2549}
2550
2551VSX_SCALAR_CMPQ(xscmpoqp, 1)
2552VSX_SCALAR_CMPQ(xscmpuqp, 0)
2553
2554/*
2555 * VSX_MAX_MIN - VSX floating point maximum/minimum
2556 *   name  - instruction mnemonic
2557 *   op    - operation (max or min)
2558 *   nels  - number of elements (1, 2 or 4)
2559 *   tp    - type (float32 or float64)
2560 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2561 */
2562#define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2563void helper_##name(CPUPPCState *env, ppc_vsr_t *xt,                           \
2564                   ppc_vsr_t *xa, ppc_vsr_t *xb)                              \
2565{                                                                             \
2566    ppc_vsr_t t = *xt;                                                        \
2567    int i;                                                                    \
2568                                                                              \
2569    for (i = 0; i < nels; i++) {                                              \
2570        t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status);                 \
2571        if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) ||       \
2572                     tp##_is_signaling_nan(xb->fld, &env->fp_status))) {      \
2573            float_invalid_op_vxsnan(env, GETPC());                            \
2574        }                                                                     \
2575    }                                                                         \
2576                                                                              \
2577    *xt = t;                                                                  \
2578    do_float_check_status(env, GETPC());                                      \
2579}
2580
2581VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2582VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2583VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2584VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2585VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2586VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2587
2588#define VSX_MAX_MINC(name, max)                                               \
2589void helper_##name(CPUPPCState *env, uint32_t opcode,                         \
2590                   ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)               \
2591{                                                                             \
2592    ppc_vsr_t t = *xt;                                                        \
2593    bool vxsnan_flag = false, vex_flag = false;                               \
2594                                                                              \
2595    if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||                           \
2596                 float64_is_any_nan(xb->VsrD(0)))) {                          \
2597        if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||         \
2598            float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {         \
2599            vxsnan_flag = true;                                               \
2600        }                                                                     \
2601        t.VsrD(0) = xb->VsrD(0);                                              \
2602    } else if ((max &&                                                        \
2603               !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) ||     \
2604               (!max &&                                                       \
2605               float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) {      \
2606        t.VsrD(0) = xa->VsrD(0);                                              \
2607    } else {                                                                  \
2608        t.VsrD(0) = xb->VsrD(0);                                              \
2609    }                                                                         \
2610                                                                              \
2611    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2612    if (vxsnan_flag) {                                                        \
2613        float_invalid_op_vxsnan(env, GETPC());                                \
2614    }                                                                         \
2615    if (!vex_flag) {                                                          \
2616        *xt = t;                                                              \
2617    }                                                                         \
2618}                                                                             \
2619
2620VSX_MAX_MINC(xsmaxcdp, 1);
2621VSX_MAX_MINC(xsmincdp, 0);
2622
2623#define VSX_MAX_MINJ(name, max)                                               \
2624void helper_##name(CPUPPCState *env, uint32_t opcode,                         \
2625                   ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)               \
2626{                                                                             \
2627    ppc_vsr_t t = *xt;                                                        \
2628    bool vxsnan_flag = false, vex_flag = false;                               \
2629                                                                              \
2630    if (unlikely(float64_is_any_nan(xa->VsrD(0)))) {                          \
2631        if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) {         \
2632            vxsnan_flag = true;                                               \
2633        }                                                                     \
2634        t.VsrD(0) = xa->VsrD(0);                                              \
2635    } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) {                   \
2636        if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {         \
2637            vxsnan_flag = true;                                               \
2638        }                                                                     \
2639        t.VsrD(0) = xb->VsrD(0);                                              \
2640    } else if (float64_is_zero(xa->VsrD(0)) &&                                \
2641               float64_is_zero(xb->VsrD(0))) {                                \
2642        if (max) {                                                            \
2643            if (!float64_is_neg(xa->VsrD(0)) ||                               \
2644                !float64_is_neg(xb->VsrD(0))) {                               \
2645                t.VsrD(0) = 0ULL;                                             \
2646            } else {                                                          \
2647                t.VsrD(0) = 0x8000000000000000ULL;                            \
2648            }                                                                 \
2649        } else {                                                              \
2650            if (float64_is_neg(xa->VsrD(0)) ||                                \
2651                float64_is_neg(xb->VsrD(0))) {                                \
2652                t.VsrD(0) = 0x8000000000000000ULL;                            \
2653            } else {                                                          \
2654                t.VsrD(0) = 0ULL;                                             \
2655            }                                                                 \
2656        }                                                                     \
2657    } else if ((max &&                                                        \
2658               !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) ||     \
2659               (!max &&                                                       \
2660               float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) {      \
2661        t.VsrD(0) = xa->VsrD(0);                                              \
2662    } else {                                                                  \
2663        t.VsrD(0) = xb->VsrD(0);                                              \
2664    }                                                                         \
2665                                                                              \
2666    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2667    if (vxsnan_flag) {                                                        \
2668        float_invalid_op_vxsnan(env, GETPC());                                \
2669    }                                                                         \
2670    if (!vex_flag) {                                                          \
2671        *xt = t;                                                              \
2672    }                                                                         \
2673}                                                                             \
2674
2675VSX_MAX_MINJ(xsmaxjdp, 1);
2676VSX_MAX_MINJ(xsminjdp, 0);
2677
2678/*
2679 * VSX_CMP - VSX floating point compare
2680 *   op    - instruction mnemonic
2681 *   nels  - number of elements (1, 2 or 4)
2682 *   tp    - type (float32 or float64)
2683 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2684 *   cmp   - comparison operation
2685 *   svxvc - set VXVC bit
2686 *   exp   - expected result of comparison
2687 */
2688#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
2689uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                     \
2690                     ppc_vsr_t *xa, ppc_vsr_t *xb)                        \
2691{                                                                         \
2692    ppc_vsr_t t = *xt;                                                    \
2693    uint32_t crf6 = 0;                                                    \
2694    int i;                                                                \
2695    int all_true = 1;                                                     \
2696    int all_false = 1;                                                    \
2697                                                                          \
2698    for (i = 0; i < nels; i++) {                                          \
2699        if (unlikely(tp##_is_any_nan(xa->fld) ||                          \
2700                     tp##_is_any_nan(xb->fld))) {                         \
2701            if (tp##_is_signaling_nan(xa->fld, &env->fp_status) ||        \
2702                tp##_is_signaling_nan(xb->fld, &env->fp_status)) {        \
2703                float_invalid_op_vxsnan(env, GETPC());                    \
2704            }                                                             \
2705            if (svxvc) {                                                  \
2706                float_invalid_op_vxvc(env, 0, GETPC());                   \
2707            }                                                             \
2708            t.fld = 0;                                                    \
2709            all_true = 0;                                                 \
2710        } else {                                                          \
2711            if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) {   \
2712                t.fld = -1;                                               \
2713                all_false = 0;                                            \
2714            } else {                                                      \
2715                t.fld = 0;                                                \
2716                all_true = 0;                                             \
2717            }                                                             \
2718        }                                                                 \
2719    }                                                                     \
2720                                                                          \
2721    *xt = t;                                                              \
2722    crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);                  \
2723    return crf6;                                                          \
2724}
2725
2726VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2727VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2728VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2729VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2730VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2731VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2732VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2733VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2734
2735/*
2736 * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2737 *   op    - instruction mnemonic
2738 *   nels  - number of elements (1, 2 or 4)
2739 *   stp   - source type (float32 or float64)
2740 *   ttp   - target type (float32 or float64)
2741 *   sfld  - source vsr_t field
2742 *   tfld  - target vsr_t field (f32 or f64)
2743 *   sfprf - set FPRF
2744 */
2745#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2746void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)   \
2747{                                                                  \
2748    ppc_vsr_t t = *xt;                                             \
2749    int i;                                                         \
2750                                                                   \
2751    for (i = 0; i < nels; i++) {                                   \
2752        t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);        \
2753        if (unlikely(stp##_is_signaling_nan(xb->sfld,              \
2754                                            &env->fp_status))) {   \
2755            float_invalid_op_vxsnan(env, GETPC());                 \
2756            t.tfld = ttp##_snan_to_qnan(t.tfld);                   \
2757        }                                                          \
2758        if (sfprf) {                                               \
2759            helper_compute_fprf_##ttp(env, t.tfld);                \
2760        }                                                          \
2761    }                                                              \
2762                                                                   \
2763    *xt = t;                                                       \
2764    do_float_check_status(env, GETPC());                           \
2765}
2766
2767VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2768VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2769VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0)
2770VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
2771
2772/*
2773 * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2774 *   op    - instruction mnemonic
2775 *   nels  - number of elements (1, 2 or 4)
2776 *   stp   - source type (float32 or float64)
2777 *   ttp   - target type (float32 or float64)
2778 *   sfld  - source vsr_t field
2779 *   tfld  - target vsr_t field (f32 or f64)
2780 *   sfprf - set FPRF
2781 */
2782#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2783void helper_##op(CPUPPCState *env, uint32_t opcode,                       \
2784                 ppc_vsr_t *xt, ppc_vsr_t *xb)                            \
2785{                                                                       \
2786    ppc_vsr_t t = *xt;                                                  \
2787    int i;                                                              \
2788                                                                        \
2789    for (i = 0; i < nels; i++) {                                        \
2790        t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);             \
2791        if (unlikely(stp##_is_signaling_nan(xb->sfld,                   \
2792                                            &env->fp_status))) {        \
2793            float_invalid_op_vxsnan(env, GETPC());                      \
2794            t.tfld = ttp##_snan_to_qnan(t.tfld);                        \
2795        }                                                               \
2796        if (sfprf) {                                                    \
2797            helper_compute_fprf_##ttp(env, t.tfld);                     \
2798        }                                                               \
2799    }                                                                   \
2800                                                                        \
2801    *xt = t;                                                            \
2802    do_float_check_status(env, GETPC());                                \
2803}
2804
2805VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2806
2807/*
2808 * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2809 *                       involving one half precision value
2810 *   op    - instruction mnemonic
2811 *   nels  - number of elements (1, 2 or 4)
2812 *   stp   - source type
2813 *   ttp   - target type
2814 *   sfld  - source vsr_t field
2815 *   tfld  - target vsr_t field
2816 *   sfprf - set FPRF
2817 */
2818#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2819void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)   \
2820{                                                                  \
2821    ppc_vsr_t t = { };                                             \
2822    int i;                                                         \
2823                                                                   \
2824    for (i = 0; i < nels; i++) {                                   \
2825        t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status);     \
2826        if (unlikely(stp##_is_signaling_nan(xb->sfld,              \
2827                                            &env->fp_status))) {   \
2828            float_invalid_op_vxsnan(env, GETPC());                 \
2829            t.tfld = ttp##_snan_to_qnan(t.tfld);                   \
2830        }                                                          \
2831        if (sfprf) {                                               \
2832            helper_compute_fprf_##ttp(env, t.tfld);                \
2833        }                                                          \
2834    }                                                              \
2835                                                                   \
2836    *xt = t;                                                       \
2837    do_float_check_status(env, GETPC());                           \
2838}
2839
2840VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2841VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2842VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i  + 1), 0)
2843VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2844
2845/*
2846 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2847 * added to this later.
2848 */
2849void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode,
2850                     ppc_vsr_t *xt, ppc_vsr_t *xb)
2851{
2852    ppc_vsr_t t = { };
2853    float_status tstat;
2854
2855    tstat = env->fp_status;
2856    if (unlikely(Rc(opcode) != 0)) {
2857        tstat.float_rounding_mode = float_round_to_odd;
2858    }
2859
2860    t.VsrD(0) = float128_to_float64(xb->f128, &tstat);
2861    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2862    if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) {
2863        float_invalid_op_vxsnan(env, GETPC());
2864        t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0));
2865    }
2866    helper_compute_fprf_float64(env, t.VsrD(0));
2867
2868    *xt = t;
2869    do_float_check_status(env, GETPC());
2870}
2871
2872uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2873{
2874    float_status tstat = env->fp_status;
2875    set_float_exception_flags(0, &tstat);
2876
2877    return (uint64_t)float64_to_float32(xb, &tstat) << 32;
2878}
2879
2880uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2881{
2882    float_status tstat = env->fp_status;
2883    set_float_exception_flags(0, &tstat);
2884
2885    return float32_to_float64(xb >> 32, &tstat);
2886}
2887
2888/*
2889 * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2890 *   op    - instruction mnemonic
2891 *   nels  - number of elements (1, 2 or 4)
2892 *   stp   - source type (float32 or float64)
2893 *   ttp   - target type (int32, uint32, int64 or uint64)
2894 *   sfld  - source vsr_t field
2895 *   tfld  - target vsr_t field
2896 *   rnan  - resulting NaN
2897 */
2898#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
2899void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2900{                                                                            \
2901    int all_flags = env->fp_status.float_exception_flags, flags;             \
2902    ppc_vsr_t t = *xt;                                                       \
2903    int i;                                                                   \
2904                                                                             \
2905    for (i = 0; i < nels; i++) {                                             \
2906        env->fp_status.float_exception_flags = 0;                            \
2907        t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status);  \
2908        flags = env->fp_status.float_exception_flags;                        \
2909        if (unlikely(flags & float_flag_invalid)) {                          \
2910            float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld));    \
2911            t.tfld = rnan;                                                   \
2912        }                                                                    \
2913        all_flags |= flags;                                                  \
2914    }                                                                        \
2915                                                                             \
2916    *xt = t;                                                                 \
2917    env->fp_status.float_exception_flags = all_flags;                        \
2918    do_float_check_status(env, GETPC());                                     \
2919}
2920
2921VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2922                  0x8000000000000000ULL)
2923VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
2924                  0x80000000U)
2925VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
2926VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
2927VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
2928                  0x8000000000000000ULL)
2929VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2 * i), \
2930                  0x80000000U)
2931VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
2932VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2 * i), 0U)
2933VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), \
2934                  0x8000000000000000ULL)
2935VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
2936VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL)
2937VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
2938
2939/*
2940 * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2941 *   op    - instruction mnemonic
2942 *   stp   - source type (float32 or float64)
2943 *   ttp   - target type (int32, uint32, int64 or uint64)
2944 *   sfld  - source vsr_t field
2945 *   tfld  - target vsr_t field
2946 *   rnan  - resulting NaN
2947 */
2948#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan)             \
2949void helper_##op(CPUPPCState *env, uint32_t opcode,                          \
2950                 ppc_vsr_t *xt, ppc_vsr_t *xb)                               \
2951{                                                                            \
2952    ppc_vsr_t t = { };                                                       \
2953                                                                             \
2954    t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status);      \
2955    if (env->fp_status.float_exception_flags & float_flag_invalid) {         \
2956        float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld));        \
2957        t.tfld = rnan;                                                       \
2958    }                                                                        \
2959                                                                             \
2960    *xt = t;                                                                 \
2961    do_float_check_status(env, GETPC());                                     \
2962}
2963
2964VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0),          \
2965                  0x8000000000000000ULL)
2966
2967VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0),          \
2968                  0xffffffff80000000ULL)
2969VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
2970VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
2971
2972/*
2973 * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2974 *   op    - instruction mnemonic
2975 *   nels  - number of elements (1, 2 or 4)
2976 *   stp   - source type (int32, uint32, int64 or uint64)
2977 *   ttp   - target type (float32 or float64)
2978 *   sfld  - source vsr_t field
2979 *   tfld  - target vsr_t field
2980 *   jdef  - definition of the j index (i or 2*i)
2981 *   sfprf - set FPRF
2982 */
2983#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
2984void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)        \
2985{                                                                       \
2986    ppc_vsr_t t = *xt;                                                  \
2987    int i;                                                              \
2988                                                                        \
2989    for (i = 0; i < nels; i++) {                                        \
2990        t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);             \
2991        if (r2sp) {                                                     \
2992            t.tfld = helper_frsp(env, t.tfld);                          \
2993        }                                                               \
2994        if (sfprf) {                                                    \
2995            helper_compute_fprf_float64(env, t.tfld);                   \
2996        }                                                               \
2997    }                                                                   \
2998                                                                        \
2999    *xt = t;                                                            \
3000    do_float_check_status(env, GETPC());                                \
3001}
3002
3003VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3004VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3005VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3006VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3007VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3008VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3009VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
3010VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
3011VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2 * i), 0, 0)
3012VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2 * i), 0, 0)
3013VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3014VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3015
3016/*
3017 * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3018 *   op    - instruction mnemonic
3019 *   stp   - source type (int32, uint32, int64 or uint64)
3020 *   ttp   - target type (float32 or float64)
3021 *   sfld  - source vsr_t field
3022 *   tfld  - target vsr_t field
3023 */
3024#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld)              \
3025void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
3026                 ppc_vsr_t *xt, ppc_vsr_t *xb)                          \
3027{                                                                       \
3028    ppc_vsr_t t = *xt;                                                  \
3029                                                                        \
3030    t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);                 \
3031    helper_compute_fprf_##ttp(env, t.tfld);                             \
3032                                                                        \
3033    *xt = t;                                                            \
3034    do_float_check_status(env, GETPC());                                \
3035}
3036
3037VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3038VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3039
3040/*
3041 * For "use current rounding mode", define a value that will not be
3042 * one of the existing rounding model enums.
3043 */
3044#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3045  float_round_up + float_round_to_zero)
3046
3047/*
3048 * VSX_ROUND - VSX floating point round
3049 *   op    - instruction mnemonic
3050 *   nels  - number of elements (1, 2 or 4)
3051 *   tp    - type (float32 or float64)
3052 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3053 *   rmode - rounding mode
3054 *   sfprf - set FPRF
3055 */
3056#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf)                     \
3057void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)       \
3058{                                                                      \
3059    ppc_vsr_t t = *xt;                                                 \
3060    int i;                                                             \
3061                                                                       \
3062    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3063        set_float_rounding_mode(rmode, &env->fp_status);               \
3064    }                                                                  \
3065                                                                       \
3066    for (i = 0; i < nels; i++) {                                       \
3067        if (unlikely(tp##_is_signaling_nan(xb->fld,                    \
3068                                           &env->fp_status))) {        \
3069            float_invalid_op_vxsnan(env, GETPC());                     \
3070            t.fld = tp##_snan_to_qnan(xb->fld);                        \
3071        } else {                                                       \
3072            t.fld = tp##_round_to_int(xb->fld, &env->fp_status);       \
3073        }                                                              \
3074        if (sfprf) {                                                   \
3075            helper_compute_fprf_float64(env, t.fld);                   \
3076        }                                                              \
3077    }                                                                  \
3078                                                                       \
3079    /*                                                                 \
3080     * If this is not a "use current rounding mode" instruction,       \
3081     * then inhibit setting of the XX bit and restore rounding         \
3082     * mode from FPSCR                                                 \
3083     */                                                                \
3084    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3085        fpscr_set_rounding_mode(env);                                  \
3086        env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
3087    }                                                                  \
3088                                                                       \
3089    *xt = t;                                                           \
3090    do_float_check_status(env, GETPC());                               \
3091}
3092
3093VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3094VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3095VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3096VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3097VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3098
3099VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3100VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3101VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3102VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3103VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3104
3105VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3106VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3107VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3108VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3109VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3110
3111uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3112{
3113    helper_reset_fpstatus(env);
3114
3115    uint64_t xt = helper_frsp(env, xb);
3116
3117    helper_compute_fprf_float64(env, xt);
3118    do_float_check_status(env, GETPC());
3119    return xt;
3120}
3121
3122#define VSX_XXPERM(op, indexed)                                       \
3123void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                     \
3124                 ppc_vsr_t *xa, ppc_vsr_t *pcv)                       \
3125{                                                                     \
3126    ppc_vsr_t t = *xt;                                                \
3127    int i, idx;                                                       \
3128                                                                      \
3129    for (i = 0; i < 16; i++) {                                        \
3130        idx = pcv->VsrB(i) & 0x1F;                                    \
3131        if (indexed) {                                                \
3132            idx = 31 - idx;                                           \
3133        }                                                             \
3134        t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx)                       \
3135                                : xt->VsrB(idx - 16);                 \
3136    }                                                                 \
3137    *xt = t;                                                          \
3138}
3139
3140VSX_XXPERM(xxperm, 0)
3141VSX_XXPERM(xxpermr, 1)
3142
3143void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
3144{
3145    ppc_vsr_t t = { };
3146    uint32_t exp, i, fraction;
3147
3148    for (i = 0; i < 4; i++) {
3149        exp = (xb->VsrW(i) >> 23) & 0xFF;
3150        fraction = xb->VsrW(i) & 0x7FFFFF;
3151        if (exp != 0 && exp != 255) {
3152            t.VsrW(i) = fraction | 0x00800000;
3153        } else {
3154            t.VsrW(i) = fraction;
3155        }
3156    }
3157    *xt = t;
3158}
3159
3160/*
3161 * VSX_TEST_DC - VSX floating point test data class
3162 *   op    - instruction mnemonic
3163 *   nels  - number of elements (1, 2 or 4)
3164 *   xbn   - VSR register number
3165 *   tp    - type (float32 or float64)
3166 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3167 *   tfld   - target vsr_t field (VsrD(*) or VsrW(*))
3168 *   fld_max - target field max
3169 *   scrf - set result in CR and FPCC
3170 */
3171#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf)  \
3172void helper_##op(CPUPPCState *env, uint32_t opcode)         \
3173{                                                           \
3174    ppc_vsr_t *xt = &env->vsr[xT(opcode)];                  \
3175    ppc_vsr_t *xb = &env->vsr[xbn];                         \
3176    ppc_vsr_t t = { };                                      \
3177    uint32_t i, sign, dcmx;                                 \
3178    uint32_t cc, match = 0;                                 \
3179                                                            \
3180    if (!scrf) {                                            \
3181        dcmx = DCMX_XV(opcode);                             \
3182    } else {                                                \
3183        t = *xt;                                            \
3184        dcmx = DCMX(opcode);                                \
3185    }                                                       \
3186                                                            \
3187    for (i = 0; i < nels; i++) {                            \
3188        sign = tp##_is_neg(xb->fld);                        \
3189        if (tp##_is_any_nan(xb->fld)) {                     \
3190            match = extract32(dcmx, 6, 1);                  \
3191        } else if (tp##_is_infinity(xb->fld)) {             \
3192            match = extract32(dcmx, 4 + !sign, 1);          \
3193        } else if (tp##_is_zero(xb->fld)) {                 \
3194            match = extract32(dcmx, 2 + !sign, 1);          \
3195        } else if (tp##_is_zero_or_denormal(xb->fld)) {     \
3196            match = extract32(dcmx, 0 + !sign, 1);          \
3197        }                                                   \
3198                                                            \
3199        if (scrf) {                                         \
3200            cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT;  \
3201            env->fpscr &= ~(0x0F << FPSCR_FPRF);            \
3202            env->fpscr |= cc << FPSCR_FPRF;                 \
3203            env->crf[BF(opcode)] = cc;                      \
3204        } else {                                            \
3205            t.tfld = match ? fld_max : 0;                   \
3206        }                                                   \
3207        match = 0;                                          \
3208    }                                                       \
3209    if (!scrf) {                                            \
3210        *xt = t;                                            \
3211    }                                                       \
3212}
3213
3214VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3215VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3216VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3217VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3218
3219void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)
3220{
3221    uint32_t dcmx, sign, exp;
3222    uint32_t cc, match = 0, not_sp = 0;
3223
3224    dcmx = DCMX(opcode);
3225    exp = (xb->VsrD(0) >> 52) & 0x7FF;
3226
3227    sign = float64_is_neg(xb->VsrD(0));
3228    if (float64_is_any_nan(xb->VsrD(0))) {
3229        match = extract32(dcmx, 6, 1);
3230    } else if (float64_is_infinity(xb->VsrD(0))) {
3231        match = extract32(dcmx, 4 + !sign, 1);
3232    } else if (float64_is_zero(xb->VsrD(0))) {
3233        match = extract32(dcmx, 2 + !sign, 1);
3234    } else if (float64_is_zero_or_denormal(xb->VsrD(0)) ||
3235               (exp > 0 && exp < 0x381)) {
3236        match = extract32(dcmx, 0 + !sign, 1);
3237    }
3238
3239    not_sp = !float64_eq(xb->VsrD(0),
3240                         float32_to_float64(
3241                             float64_to_float32(xb->VsrD(0), &env->fp_status),
3242                             &env->fp_status), &env->fp_status);
3243
3244    cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3245    env->fpscr &= ~(0x0F << FPSCR_FPRF);
3246    env->fpscr |= cc << FPSCR_FPRF;
3247    env->crf[BF(opcode)] = cc;
3248}
3249
3250void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
3251                   ppc_vsr_t *xt, ppc_vsr_t *xb)
3252{
3253    ppc_vsr_t t = { };
3254    uint8_t r = Rrm(opcode);
3255    uint8_t ex = Rc(opcode);
3256    uint8_t rmc = RMC(opcode);
3257    uint8_t rmode = 0;
3258    float_status tstat;
3259
3260    helper_reset_fpstatus(env);
3261
3262    if (r == 0 && rmc == 0) {
3263        rmode = float_round_ties_away;
3264    } else if (r == 0 && rmc == 0x3) {
3265        rmode = fpscr_rn;
3266    } else if (r == 1) {
3267        switch (rmc) {
3268        case 0:
3269            rmode = float_round_nearest_even;
3270            break;
3271        case 1:
3272            rmode = float_round_to_zero;
3273            break;
3274        case 2:
3275            rmode = float_round_up;
3276            break;
3277        case 3:
3278            rmode = float_round_down;
3279            break;
3280        default:
3281            abort();
3282        }
3283    }
3284
3285    tstat = env->fp_status;
3286    set_float_exception_flags(0, &tstat);
3287    set_float_rounding_mode(rmode, &tstat);
3288    t.f128 = float128_round_to_int(xb->f128, &tstat);
3289    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3290
3291    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3292        if (float128_is_signaling_nan(xb->f128, &tstat)) {
3293            float_invalid_op_vxsnan(env, GETPC());
3294            t.f128 = float128_snan_to_qnan(t.f128);
3295        }
3296    }
3297
3298    if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3299        env->fp_status.float_exception_flags &= ~float_flag_inexact;
3300    }
3301
3302    helper_compute_fprf_float128(env, t.f128);
3303    do_float_check_status(env, GETPC());
3304    *xt = t;
3305}
3306
3307void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
3308                    ppc_vsr_t *xt, ppc_vsr_t *xb)
3309{
3310    ppc_vsr_t t = { };
3311    uint8_t r = Rrm(opcode);
3312    uint8_t rmc = RMC(opcode);
3313    uint8_t rmode = 0;
3314    floatx80 round_res;
3315    float_status tstat;
3316
3317    helper_reset_fpstatus(env);
3318
3319    if (r == 0 && rmc == 0) {
3320        rmode = float_round_ties_away;
3321    } else if (r == 0 && rmc == 0x3) {
3322        rmode = fpscr_rn;
3323    } else if (r == 1) {
3324        switch (rmc) {
3325        case 0:
3326            rmode = float_round_nearest_even;
3327            break;
3328        case 1:
3329            rmode = float_round_to_zero;
3330            break;
3331        case 2:
3332            rmode = float_round_up;
3333            break;
3334        case 3:
3335            rmode = float_round_down;
3336            break;
3337        default:
3338            abort();
3339        }
3340    }
3341
3342    tstat = env->fp_status;
3343    set_float_exception_flags(0, &tstat);
3344    set_float_rounding_mode(rmode, &tstat);
3345    round_res = float128_to_floatx80(xb->f128, &tstat);
3346    t.f128 = floatx80_to_float128(round_res, &tstat);
3347    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3348
3349    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3350        if (float128_is_signaling_nan(xb->f128, &tstat)) {
3351            float_invalid_op_vxsnan(env, GETPC());
3352            t.f128 = float128_snan_to_qnan(t.f128);
3353        }
3354    }
3355
3356    helper_compute_fprf_float128(env, t.f128);
3357    *xt = t;
3358    do_float_check_status(env, GETPC());
3359}
3360
3361void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
3362                     ppc_vsr_t *xt, ppc_vsr_t *xb)
3363{
3364    ppc_vsr_t t = { };
3365    float_status tstat;
3366
3367    helper_reset_fpstatus(env);
3368
3369    tstat = env->fp_status;
3370    if (unlikely(Rc(opcode) != 0)) {
3371        tstat.float_rounding_mode = float_round_to_odd;
3372    }
3373
3374    set_float_exception_flags(0, &tstat);
3375    t.f128 = float128_sqrt(xb->f128, &tstat);
3376    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3377
3378    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3379        if (float128_is_signaling_nan(xb->f128, &tstat)) {
3380            float_invalid_op_vxsnan(env, GETPC());
3381            t.f128 = float128_snan_to_qnan(xb->f128);
3382        } else if (float128_is_quiet_nan(xb->f128, &tstat)) {
3383            t.f128 = xb->f128;
3384        } else if (float128_is_neg(xb->f128) && !float128_is_zero(xb->f128)) {
3385            float_invalid_op_vxsqrt(env, 1, GETPC());
3386            t.f128 = float128_default_nan(&env->fp_status);
3387        }
3388    }
3389
3390    helper_compute_fprf_float128(env, t.f128);
3391    *xt = t;
3392    do_float_check_status(env, GETPC());
3393}
3394
3395void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
3396                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
3397{
3398    ppc_vsr_t t = *xt;
3399    float_status tstat;
3400
3401    helper_reset_fpstatus(env);
3402
3403    tstat = env->fp_status;
3404    if (unlikely(Rc(opcode) != 0)) {
3405        tstat.float_rounding_mode = float_round_to_odd;
3406    }
3407
3408    set_float_exception_flags(0, &tstat);
3409    t.f128 = float128_sub(xa->f128, xb->f128, &tstat);
3410    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3411
3412    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3413        float_invalid_op_addsub(env, 1, GETPC(),
3414                                float128_classify(xa->f128) |
3415                                float128_classify(xb->f128));
3416    }
3417
3418    helper_compute_fprf_float128(env, t.f128);
3419    *xt = t;
3420    do_float_check_status(env, GETPC());
3421}
3422