qemu/target/ppc/fpu_helper.c
<<
>>
Prefs
   1/*
   2 *  PowerPC floating point and SPE emulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "exec/helper-proto.h"
  22#include "exec/exec-all.h"
  23#include "internal.h"
  24#include "fpu/softfloat.h"
  25
  26static inline float128 float128_snan_to_qnan(float128 x)
  27{
  28    float128 r;
  29
  30    r.high = x.high | 0x0000800000000000;
  31    r.low = x.low;
  32    return r;
  33}
  34
  35#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
  36#define float32_snan_to_qnan(x) ((x) | 0x00400000)
  37#define float16_snan_to_qnan(x) ((x) | 0x0200)
  38
  39static inline bool fp_exceptions_enabled(CPUPPCState *env)
  40{
  41#ifdef CONFIG_USER_ONLY
  42    return true;
  43#else
  44    return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0;
  45#endif
  46}
  47
  48/*****************************************************************************/
  49/* Floating point operations helpers */
  50
  51/*
  52 * This is the non-arithmatic conversion that happens e.g. on loads.
  53 * In the Power ISA pseudocode, this is called DOUBLE.
  54 */
  55uint64_t helper_todouble(uint32_t arg)
  56{
  57    uint32_t abs_arg = arg & 0x7fffffff;
  58    uint64_t ret;
  59
  60    if (likely(abs_arg >= 0x00800000)) {
  61        if (unlikely(extract32(arg, 23, 8) == 0xff)) {
  62            /* Inf or NAN.  */
  63            ret  = (uint64_t)extract32(arg, 31, 1) << 63;
  64            ret |= (uint64_t)0x7ff << 52;
  65            ret |= (uint64_t)extract32(arg, 0, 23) << 29;
  66        } else {
  67            /* Normalized operand.  */
  68            ret  = (uint64_t)extract32(arg, 30, 2) << 62;
  69            ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
  70            ret |= (uint64_t)extract32(arg, 0, 30) << 29;
  71        }
  72    } else {
  73        /* Zero or Denormalized operand.  */
  74        ret = (uint64_t)extract32(arg, 31, 1) << 63;
  75        if (unlikely(abs_arg != 0)) {
  76            /*
  77             * Denormalized operand.
  78             * Shift fraction so that the msb is in the implicit bit position.
  79             * Thus, shift is in the range [1:23].
  80             */
  81            int shift = clz32(abs_arg) - 8;
  82            /*
  83             * The first 3 terms compute the float64 exponent.  We then bias
  84             * this result by -1 so that we can swallow the implicit bit below.
  85             */
  86            int exp = -126 - shift + 1023 - 1;
  87
  88            ret |= (uint64_t)exp << 52;
  89            ret += (uint64_t)abs_arg << (52 - 23 + shift);
  90        }
  91    }
  92    return ret;
  93}
  94
  95/*
  96 * This is the non-arithmatic conversion that happens e.g. on stores.
  97 * In the Power ISA pseudocode, this is called SINGLE.
  98 */
  99uint32_t helper_tosingle(uint64_t arg)
 100{
 101    int exp = extract64(arg, 52, 11);
 102    uint32_t ret;
 103
 104    if (likely(exp > 896)) {
 105        /* No denormalization required (includes Inf, NaN).  */
 106        ret  = extract64(arg, 62, 2) << 30;
 107        ret |= extract64(arg, 29, 30);
 108    } else {
 109        /*
 110         * Zero or Denormal result.  If the exponent is in bounds for
 111         * a single-precision denormal result, extract the proper
 112         * bits.  If the input is not zero, and the exponent is out of
 113         * bounds, then the result is undefined; this underflows to
 114         * zero.
 115         */
 116        ret = extract64(arg, 63, 1) << 31;
 117        if (unlikely(exp >= 874)) {
 118            /* Denormal result.  */
 119            ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp);
 120        }
 121    }
 122    return ret;
 123}
 124
 125static inline int ppc_float32_get_unbiased_exp(float32 f)
 126{
 127    return ((f >> 23) & 0xFF) - 127;
 128}
 129
 130static inline int ppc_float64_get_unbiased_exp(float64 f)
 131{
 132    return ((f >> 52) & 0x7FF) - 1023;
 133}
 134
 135/* Classify a floating-point number.  */
 136enum {
 137    is_normal   = 1,
 138    is_zero     = 2,
 139    is_denormal = 4,
 140    is_inf      = 8,
 141    is_qnan     = 16,
 142    is_snan     = 32,
 143    is_neg      = 64,
 144};
 145
 146#define COMPUTE_CLASS(tp)                                      \
 147static int tp##_classify(tp arg)                               \
 148{                                                              \
 149    int ret = tp##_is_neg(arg) * is_neg;                       \
 150    if (unlikely(tp##_is_any_nan(arg))) {                      \
 151        float_status dummy = { };  /* snan_bit_is_one = 0 */   \
 152        ret |= (tp##_is_signaling_nan(arg, &dummy)             \
 153                ? is_snan : is_qnan);                          \
 154    } else if (unlikely(tp##_is_infinity(arg))) {              \
 155        ret |= is_inf;                                         \
 156    } else if (tp##_is_zero(arg)) {                            \
 157        ret |= is_zero;                                        \
 158    } else if (tp##_is_zero_or_denormal(arg)) {                \
 159        ret |= is_denormal;                                    \
 160    } else {                                                   \
 161        ret |= is_normal;                                      \
 162    }                                                          \
 163    return ret;                                                \
 164}
 165
 166COMPUTE_CLASS(float16)
 167COMPUTE_CLASS(float32)
 168COMPUTE_CLASS(float64)
 169COMPUTE_CLASS(float128)
 170
 171static void set_fprf_from_class(CPUPPCState *env, int class)
 172{
 173    static const uint8_t fprf[6][2] = {
 174        { 0x04, 0x08 },  /* normalized */
 175        { 0x02, 0x12 },  /* zero */
 176        { 0x14, 0x18 },  /* denormalized */
 177        { 0x05, 0x09 },  /* infinity */
 178        { 0x11, 0x11 },  /* qnan */
 179        { 0x00, 0x00 },  /* snan -- flags are undefined */
 180    };
 181    bool isneg = class & is_neg;
 182
 183    env->fpscr &= ~FP_FPRF;
 184    env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF;
 185}
 186
 187#define COMPUTE_FPRF(tp)                                \
 188void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
 189{                                                       \
 190    set_fprf_from_class(env, tp##_classify(arg));       \
 191}
 192
 193COMPUTE_FPRF(float16)
 194COMPUTE_FPRF(float32)
 195COMPUTE_FPRF(float64)
 196COMPUTE_FPRF(float128)
 197
 198/* Floating-point invalid operations exception */
 199static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
 200{
 201    /* Update the floating-point invalid operation summary */
 202    env->fpscr |= FP_VX;
 203    /* Update the floating-point exception summary */
 204    env->fpscr |= FP_FX;
 205    if (fpscr_ve != 0) {
 206        /* Update the floating-point enabled exception summary */
 207        env->fpscr |= FP_FEX;
 208        if (fp_exceptions_enabled(env)) {
 209            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 210                                   POWERPC_EXCP_FP | op, retaddr);
 211        }
 212    }
 213}
 214
 215static void finish_invalid_op_arith(CPUPPCState *env, int op,
 216                                    bool set_fpcc, uintptr_t retaddr)
 217{
 218    env->fpscr &= ~(FP_FR | FP_FI);
 219    if (fpscr_ve == 0) {
 220        if (set_fpcc) {
 221            env->fpscr &= ~FP_FPCC;
 222            env->fpscr |= (FP_C | FP_FU);
 223        }
 224    }
 225    finish_invalid_op_excp(env, op, retaddr);
 226}
 227
 228/* Signalling NaN */
 229static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr)
 230{
 231    env->fpscr |= FP_VXSNAN;
 232    finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr);
 233}
 234
 235/* Magnitude subtraction of infinities */
 236static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc,
 237                                   uintptr_t retaddr)
 238{
 239    env->fpscr |= FP_VXISI;
 240    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr);
 241}
 242
 243/* Division of infinity by infinity */
 244static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc,
 245                                   uintptr_t retaddr)
 246{
 247    env->fpscr |= FP_VXIDI;
 248    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr);
 249}
 250
 251/* Division of zero by zero */
 252static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc,
 253                                   uintptr_t retaddr)
 254{
 255    env->fpscr |= FP_VXZDZ;
 256    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr);
 257}
 258
 259/* Multiplication of zero by infinity */
 260static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc,
 261                                   uintptr_t retaddr)
 262{
 263    env->fpscr |= FP_VXIMZ;
 264    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr);
 265}
 266
 267/* Square root of a negative number */
 268static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc,
 269                                    uintptr_t retaddr)
 270{
 271    env->fpscr |= FP_VXSQRT;
 272    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr);
 273}
 274
 275/* Ordered comparison of NaN */
 276static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
 277                                  uintptr_t retaddr)
 278{
 279    env->fpscr |= FP_VXVC;
 280    if (set_fpcc) {
 281        env->fpscr &= ~FP_FPCC;
 282        env->fpscr |= (FP_C | FP_FU);
 283    }
 284    /* Update the floating-point invalid operation summary */
 285    env->fpscr |= FP_VX;
 286    /* Update the floating-point exception summary */
 287    env->fpscr |= FP_FX;
 288    /* We must update the target FPR before raising the exception */
 289    if (fpscr_ve != 0) {
 290        CPUState *cs = env_cpu(env);
 291
 292        cs->exception_index = POWERPC_EXCP_PROGRAM;
 293        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
 294        /* Update the floating-point enabled exception summary */
 295        env->fpscr |= FP_FEX;
 296        /* Exception is differed */
 297    }
 298}
 299
 300/* Invalid conversion */
 301static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
 302                                   uintptr_t retaddr)
 303{
 304    env->fpscr |= FP_VXCVI;
 305    env->fpscr &= ~(FP_FR | FP_FI);
 306    if (fpscr_ve == 0) {
 307        if (set_fpcc) {
 308            env->fpscr &= ~FP_FPCC;
 309            env->fpscr |= (FP_C | FP_FU);
 310        }
 311    }
 312    finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr);
 313}
 314
 315static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
 316{
 317    env->fpscr |= FP_ZX;
 318    env->fpscr &= ~(FP_FR | FP_FI);
 319    /* Update the floating-point exception summary */
 320    env->fpscr |= FP_FX;
 321    if (fpscr_ze != 0) {
 322        /* Update the floating-point enabled exception summary */
 323        env->fpscr |= FP_FEX;
 324        if (fp_exceptions_enabled(env)) {
 325            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 326                                   POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
 327                                   raddr);
 328        }
 329    }
 330}
 331
 332static inline void float_overflow_excp(CPUPPCState *env)
 333{
 334    CPUState *cs = env_cpu(env);
 335
 336    env->fpscr |= FP_OX;
 337    /* Update the floating-point exception summary */
 338    env->fpscr |= FP_FX;
 339    if (fpscr_oe != 0) {
 340        /* XXX: should adjust the result */
 341        /* Update the floating-point enabled exception summary */
 342        env->fpscr |= FP_FEX;
 343        /* We must update the target FPR before raising the exception */
 344        cs->exception_index = POWERPC_EXCP_PROGRAM;
 345        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 346    } else {
 347        env->fpscr |= FP_XX;
 348        env->fpscr |= FP_FI;
 349    }
 350}
 351
 352static inline void float_underflow_excp(CPUPPCState *env)
 353{
 354    CPUState *cs = env_cpu(env);
 355
 356    env->fpscr |= FP_UX;
 357    /* Update the floating-point exception summary */
 358    env->fpscr |= FP_FX;
 359    if (fpscr_ue != 0) {
 360        /* XXX: should adjust the result */
 361        /* Update the floating-point enabled exception summary */
 362        env->fpscr |= FP_FEX;
 363        /* We must update the target FPR before raising the exception */
 364        cs->exception_index = POWERPC_EXCP_PROGRAM;
 365        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 366    }
 367}
 368
 369static inline void float_inexact_excp(CPUPPCState *env)
 370{
 371    CPUState *cs = env_cpu(env);
 372
 373    env->fpscr |= FP_FI;
 374    env->fpscr |= FP_XX;
 375    /* Update the floating-point exception summary */
 376    env->fpscr |= FP_FX;
 377    if (fpscr_xe != 0) {
 378        /* Update the floating-point enabled exception summary */
 379        env->fpscr |= FP_FEX;
 380        /* We must update the target FPR before raising the exception */
 381        cs->exception_index = POWERPC_EXCP_PROGRAM;
 382        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 383    }
 384}
 385
 386static inline void fpscr_set_rounding_mode(CPUPPCState *env)
 387{
 388    int rnd_type;
 389
 390    /* Set rounding mode */
 391    switch (fpscr_rn) {
 392    case 0:
 393        /* Best approximation (round to nearest) */
 394        rnd_type = float_round_nearest_even;
 395        break;
 396    case 1:
 397        /* Smaller magnitude (round toward zero) */
 398        rnd_type = float_round_to_zero;
 399        break;
 400    case 2:
 401        /* Round toward +infinite */
 402        rnd_type = float_round_up;
 403        break;
 404    default:
 405    case 3:
 406        /* Round toward -infinite */
 407        rnd_type = float_round_down;
 408        break;
 409    }
 410    set_float_rounding_mode(rnd_type, &env->fp_status);
 411}
 412
 413void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
 414{
 415    int prev;
 416
 417    prev = (env->fpscr >> bit) & 1;
 418    env->fpscr &= ~(1 << bit);
 419    if (prev == 1) {
 420        switch (bit) {
 421        case FPSCR_RN1:
 422        case FPSCR_RN0:
 423            fpscr_set_rounding_mode(env);
 424            break;
 425        case FPSCR_VXSNAN:
 426        case FPSCR_VXISI:
 427        case FPSCR_VXIDI:
 428        case FPSCR_VXZDZ:
 429        case FPSCR_VXIMZ:
 430        case FPSCR_VXVC:
 431        case FPSCR_VXSOFT:
 432        case FPSCR_VXSQRT:
 433        case FPSCR_VXCVI:
 434            if (!fpscr_ix) {
 435                /* Set VX bit to zero */
 436                env->fpscr &= ~FP_VX;
 437            }
 438            break;
 439        case FPSCR_OX:
 440        case FPSCR_UX:
 441        case FPSCR_ZX:
 442        case FPSCR_XX:
 443        case FPSCR_VE:
 444        case FPSCR_OE:
 445        case FPSCR_UE:
 446        case FPSCR_ZE:
 447        case FPSCR_XE:
 448            if (!fpscr_eex) {
 449                /* Set the FEX bit */
 450                env->fpscr &= ~FP_FEX;
 451            }
 452            break;
 453        default:
 454            break;
 455        }
 456    }
 457}
 458
 459void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
 460{
 461    CPUState *cs = env_cpu(env);
 462    int prev;
 463
 464    prev = (env->fpscr >> bit) & 1;
 465    env->fpscr |= 1 << bit;
 466    if (prev == 0) {
 467        switch (bit) {
 468        case FPSCR_VX:
 469            env->fpscr |= FP_FX;
 470            if (fpscr_ve) {
 471                goto raise_ve;
 472            }
 473            break;
 474        case FPSCR_OX:
 475            env->fpscr |= FP_FX;
 476            if (fpscr_oe) {
 477                goto raise_oe;
 478            }
 479            break;
 480        case FPSCR_UX:
 481            env->fpscr |= FP_FX;
 482            if (fpscr_ue) {
 483                goto raise_ue;
 484            }
 485            break;
 486        case FPSCR_ZX:
 487            env->fpscr |= FP_FX;
 488            if (fpscr_ze) {
 489                goto raise_ze;
 490            }
 491            break;
 492        case FPSCR_XX:
 493            env->fpscr |= FP_FX;
 494            if (fpscr_xe) {
 495                goto raise_xe;
 496            }
 497            break;
 498        case FPSCR_VXSNAN:
 499        case FPSCR_VXISI:
 500        case FPSCR_VXIDI:
 501        case FPSCR_VXZDZ:
 502        case FPSCR_VXIMZ:
 503        case FPSCR_VXVC:
 504        case FPSCR_VXSOFT:
 505        case FPSCR_VXSQRT:
 506        case FPSCR_VXCVI:
 507            env->fpscr |= FP_VX;
 508            env->fpscr |= FP_FX;
 509            if (fpscr_ve != 0) {
 510                goto raise_ve;
 511            }
 512            break;
 513        case FPSCR_VE:
 514            if (fpscr_vx != 0) {
 515            raise_ve:
 516                env->error_code = POWERPC_EXCP_FP;
 517                if (fpscr_vxsnan) {
 518                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
 519                }
 520                if (fpscr_vxisi) {
 521                    env->error_code |= POWERPC_EXCP_FP_VXISI;
 522                }
 523                if (fpscr_vxidi) {
 524                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
 525                }
 526                if (fpscr_vxzdz) {
 527                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
 528                }
 529                if (fpscr_vximz) {
 530                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
 531                }
 532                if (fpscr_vxvc) {
 533                    env->error_code |= POWERPC_EXCP_FP_VXVC;
 534                }
 535                if (fpscr_vxsoft) {
 536                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
 537                }
 538                if (fpscr_vxsqrt) {
 539                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
 540                }
 541                if (fpscr_vxcvi) {
 542                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
 543                }
 544                goto raise_excp;
 545            }
 546            break;
 547        case FPSCR_OE:
 548            if (fpscr_ox != 0) {
 549            raise_oe:
 550                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 551                goto raise_excp;
 552            }
 553            break;
 554        case FPSCR_UE:
 555            if (fpscr_ux != 0) {
 556            raise_ue:
 557                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 558                goto raise_excp;
 559            }
 560            break;
 561        case FPSCR_ZE:
 562            if (fpscr_zx != 0) {
 563            raise_ze:
 564                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
 565                goto raise_excp;
 566            }
 567            break;
 568        case FPSCR_XE:
 569            if (fpscr_xx != 0) {
 570            raise_xe:
 571                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 572                goto raise_excp;
 573            }
 574            break;
 575        case FPSCR_RN1:
 576        case FPSCR_RN0:
 577            fpscr_set_rounding_mode(env);
 578            break;
 579        default:
 580            break;
 581        raise_excp:
 582            /* Update the floating-point enabled exception summary */
 583            env->fpscr |= FP_FEX;
 584            /* We have to update Rc1 before raising the exception */
 585            cs->exception_index = POWERPC_EXCP_PROGRAM;
 586            break;
 587        }
 588    }
 589}
 590
 591void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 592{
 593    CPUState *cs = env_cpu(env);
 594    target_ulong prev, new;
 595    int i;
 596
 597    prev = env->fpscr;
 598    new = (target_ulong)arg;
 599    new &= ~(FP_FEX | FP_VX);
 600    new |= prev & (FP_FEX | FP_VX);
 601    for (i = 0; i < sizeof(target_ulong) * 2; i++) {
 602        if (mask & (1 << i)) {
 603            env->fpscr &= ~(0xFLL << (4 * i));
 604            env->fpscr |= new & (0xFLL << (4 * i));
 605        }
 606    }
 607    /* Update VX and FEX */
 608    if (fpscr_ix != 0) {
 609        env->fpscr |= FP_VX;
 610    } else {
 611        env->fpscr &= ~FP_VX;
 612    }
 613    if ((fpscr_ex & fpscr_eex) != 0) {
 614        env->fpscr |= FP_FEX;
 615        cs->exception_index = POWERPC_EXCP_PROGRAM;
 616        /* XXX: we should compute it properly */
 617        env->error_code = POWERPC_EXCP_FP;
 618    } else {
 619        env->fpscr &= ~FP_FEX;
 620    }
 621    fpscr_set_rounding_mode(env);
 622}
 623
 624void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 625{
 626    helper_store_fpscr(env, arg, mask);
 627}
 628
 629static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
 630{
 631    CPUState *cs = env_cpu(env);
 632    int status = get_float_exception_flags(&env->fp_status);
 633
 634    if (status & float_flag_overflow) {
 635        float_overflow_excp(env);
 636    } else if (status & float_flag_underflow) {
 637        float_underflow_excp(env);
 638    }
 639    if (status & float_flag_inexact) {
 640        float_inexact_excp(env);
 641    } else {
 642        env->fpscr &= ~FP_FI; /* clear the FPSCR[FI] bit */
 643    }
 644
 645    if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
 646        (env->error_code & POWERPC_EXCP_FP)) {
 647        /* Differred floating-point exception after target FPR update */
 648        if (fp_exceptions_enabled(env)) {
 649            raise_exception_err_ra(env, cs->exception_index,
 650                                   env->error_code, raddr);
 651        }
 652    }
 653}
 654
 655void helper_float_check_status(CPUPPCState *env)
 656{
 657    do_float_check_status(env, GETPC());
 658}
 659
 660void helper_reset_fpstatus(CPUPPCState *env)
 661{
 662    set_float_exception_flags(0, &env->fp_status);
 663}
 664
 665static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc,
 666                                    uintptr_t retaddr, int classes)
 667{
 668    if ((classes & ~is_neg) == is_inf) {
 669        /* Magnitude subtraction of infinities */
 670        float_invalid_op_vxisi(env, set_fpcc, retaddr);
 671    } else if (classes & is_snan) {
 672        float_invalid_op_vxsnan(env, retaddr);
 673    }
 674}
 675
 676/* fadd - fadd. */
 677float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
 678{
 679    float64 ret = float64_add(arg1, arg2, &env->fp_status);
 680    int status = get_float_exception_flags(&env->fp_status);
 681
 682    if (unlikely(status & float_flag_invalid)) {
 683        float_invalid_op_addsub(env, 1, GETPC(),
 684                                float64_classify(arg1) |
 685                                float64_classify(arg2));
 686    }
 687
 688    return ret;
 689}
 690
 691/* fsub - fsub. */
 692float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
 693{
 694    float64 ret = float64_sub(arg1, arg2, &env->fp_status);
 695    int status = get_float_exception_flags(&env->fp_status);
 696
 697    if (unlikely(status & float_flag_invalid)) {
 698        float_invalid_op_addsub(env, 1, GETPC(),
 699                                float64_classify(arg1) |
 700                                float64_classify(arg2));
 701    }
 702
 703    return ret;
 704}
 705
 706static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc,
 707                                 uintptr_t retaddr, int classes)
 708{
 709    if ((classes & (is_zero | is_inf)) == (is_zero | is_inf)) {
 710        /* Multiplication of zero by infinity */
 711        float_invalid_op_vximz(env, set_fprc, retaddr);
 712    } else if (classes & is_snan) {
 713        float_invalid_op_vxsnan(env, retaddr);
 714    }
 715}
 716
 717/* fmul - fmul. */
 718float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
 719{
 720    float64 ret = float64_mul(arg1, arg2, &env->fp_status);
 721    int status = get_float_exception_flags(&env->fp_status);
 722
 723    if (unlikely(status & float_flag_invalid)) {
 724        float_invalid_op_mul(env, 1, GETPC(),
 725                             float64_classify(arg1) |
 726                             float64_classify(arg2));
 727    }
 728
 729    return ret;
 730}
 731
 732static void float_invalid_op_div(CPUPPCState *env, bool set_fprc,
 733                                 uintptr_t retaddr, int classes)
 734{
 735    classes &= ~is_neg;
 736    if (classes == is_inf) {
 737        /* Division of infinity by infinity */
 738        float_invalid_op_vxidi(env, set_fprc, retaddr);
 739    } else if (classes == is_zero) {
 740        /* Division of zero by zero */
 741        float_invalid_op_vxzdz(env, set_fprc, retaddr);
 742    } else if (classes & is_snan) {
 743        float_invalid_op_vxsnan(env, retaddr);
 744    }
 745}
 746
 747/* fdiv - fdiv. */
 748float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
 749{
 750    float64 ret = float64_div(arg1, arg2, &env->fp_status);
 751    int status = get_float_exception_flags(&env->fp_status);
 752
 753    if (unlikely(status)) {
 754        if (status & float_flag_invalid) {
 755            float_invalid_op_div(env, 1, GETPC(),
 756                                 float64_classify(arg1) |
 757                                 float64_classify(arg2));
 758        }
 759        if (status & float_flag_divbyzero) {
 760            float_zero_divide_excp(env, GETPC());
 761        }
 762    }
 763
 764    return ret;
 765}
 766
 767static void float_invalid_cvt(CPUPPCState *env, bool set_fprc,
 768                              uintptr_t retaddr, int class1)
 769{
 770    float_invalid_op_vxcvi(env, set_fprc, retaddr);
 771    if (class1 & is_snan) {
 772        float_invalid_op_vxsnan(env, retaddr);
 773    }
 774}
 775
 776#define FPU_FCTI(op, cvt, nanval)                                      \
 777uint64_t helper_##op(CPUPPCState *env, float64 arg)                    \
 778{                                                                      \
 779    uint64_t ret = float64_to_##cvt(arg, &env->fp_status);             \
 780    int status = get_float_exception_flags(&env->fp_status);           \
 781                                                                       \
 782    if (unlikely(status)) {                                            \
 783        if (status & float_flag_invalid) {                             \
 784            float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \
 785            ret = nanval;                                              \
 786        }                                                              \
 787        do_float_check_status(env, GETPC());                           \
 788    }                                                                  \
 789    return ret;                                                        \
 790}
 791
 792FPU_FCTI(fctiw, int32, 0x80000000U)
 793FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
 794FPU_FCTI(fctiwu, uint32, 0x00000000U)
 795FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
 796FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
 797FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
 798FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
 799FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
 800
 801#define FPU_FCFI(op, cvtr, is_single)                      \
 802uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
 803{                                                          \
 804    CPU_DoubleU farg;                                      \
 805                                                           \
 806    if (is_single) {                                       \
 807        float32 tmp = cvtr(arg, &env->fp_status);          \
 808        farg.d = float32_to_float64(tmp, &env->fp_status); \
 809    } else {                                               \
 810        farg.d = cvtr(arg, &env->fp_status);               \
 811    }                                                      \
 812    do_float_check_status(env, GETPC());                   \
 813    return farg.ll;                                        \
 814}
 815
 816FPU_FCFI(fcfid, int64_to_float64, 0)
 817FPU_FCFI(fcfids, int64_to_float32, 1)
 818FPU_FCFI(fcfidu, uint64_to_float64, 0)
 819FPU_FCFI(fcfidus, uint64_to_float32, 1)
 820
 821static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
 822                              int rounding_mode)
 823{
 824    CPU_DoubleU farg;
 825
 826    farg.ll = arg;
 827
 828    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 829        /* sNaN round */
 830        float_invalid_op_vxsnan(env, GETPC());
 831        farg.ll = arg | 0x0008000000000000ULL;
 832    } else {
 833        int inexact = get_float_exception_flags(&env->fp_status) &
 834                      float_flag_inexact;
 835        set_float_rounding_mode(rounding_mode, &env->fp_status);
 836        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
 837        /* Restore rounding mode from FPSCR */
 838        fpscr_set_rounding_mode(env);
 839
 840        /* fri* does not set FPSCR[XX] */
 841        if (!inexact) {
 842            env->fp_status.float_exception_flags &= ~float_flag_inexact;
 843        }
 844    }
 845    do_float_check_status(env, GETPC());
 846    return farg.ll;
 847}
 848
 849uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
 850{
 851    return do_fri(env, arg, float_round_ties_away);
 852}
 853
 854uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
 855{
 856    return do_fri(env, arg, float_round_to_zero);
 857}
 858
 859uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
 860{
 861    return do_fri(env, arg, float_round_up);
 862}
 863
 864uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
 865{
 866    return do_fri(env, arg, float_round_down);
 867}
 868
 869#define FPU_MADDSUB_UPDATE(NAME, TP)                                    \
 870static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3,           \
 871                 unsigned int madd_flags, uintptr_t retaddr)            \
 872{                                                                       \
 873    if (TP##_is_signaling_nan(arg1, &env->fp_status) ||                 \
 874        TP##_is_signaling_nan(arg2, &env->fp_status) ||                 \
 875        TP##_is_signaling_nan(arg3, &env->fp_status)) {                 \
 876        /* sNaN operation */                                            \
 877        float_invalid_op_vxsnan(env, retaddr);                          \
 878    }                                                                   \
 879    if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) ||               \
 880        (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) {               \
 881        /* Multiplication of zero by infinity */                        \
 882        float_invalid_op_vximz(env, 1, retaddr);                        \
 883    }                                                                   \
 884    if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) &&           \
 885        TP##_is_infinity(arg3)) {                                       \
 886        uint8_t aSign, bSign, cSign;                                    \
 887                                                                        \
 888        aSign = TP##_is_neg(arg1);                                      \
 889        bSign = TP##_is_neg(arg2);                                      \
 890        cSign = TP##_is_neg(arg3);                                      \
 891        if (madd_flags & float_muladd_negate_c) {                       \
 892            cSign ^= 1;                                                 \
 893        }                                                               \
 894        if (aSign ^ bSign ^ cSign) {                                    \
 895            float_invalid_op_vxisi(env, 1, retaddr);                    \
 896        }                                                               \
 897    }                                                                   \
 898}
 899FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
 900FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
 901
 902#define FPU_FMADD(op, madd_flags)                                       \
 903uint64_t helper_##op(CPUPPCState *env, uint64_t arg1,                   \
 904                     uint64_t arg2, uint64_t arg3)                      \
 905{                                                                       \
 906    uint32_t flags;                                                     \
 907    float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags,          \
 908                                 &env->fp_status);                      \
 909    flags = get_float_exception_flags(&env->fp_status);                 \
 910    if (flags) {                                                        \
 911        if (flags & float_flag_invalid) {                               \
 912            float64_maddsub_update_excp(env, arg1, arg2, arg3,          \
 913                                        madd_flags, GETPC());           \
 914        }                                                               \
 915        do_float_check_status(env, GETPC());                            \
 916    }                                                                   \
 917    return ret;                                                         \
 918}
 919
 920#define MADD_FLGS 0
 921#define MSUB_FLGS float_muladd_negate_c
 922#define NMADD_FLGS float_muladd_negate_result
 923#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
 924
 925FPU_FMADD(fmadd, MADD_FLGS)
 926FPU_FMADD(fnmadd, NMADD_FLGS)
 927FPU_FMADD(fmsub, MSUB_FLGS)
 928FPU_FMADD(fnmsub, NMSUB_FLGS)
 929
 930/* frsp - frsp. */
 931uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
 932{
 933    CPU_DoubleU farg;
 934    float32 f32;
 935
 936    farg.ll = arg;
 937
 938    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 939        float_invalid_op_vxsnan(env, GETPC());
 940    }
 941    f32 = float64_to_float32(farg.d, &env->fp_status);
 942    farg.d = float32_to_float64(f32, &env->fp_status);
 943
 944    return farg.ll;
 945}
 946
 947/* fsqrt - fsqrt. */
 948float64 helper_fsqrt(CPUPPCState *env, float64 arg)
 949{
 950    float64 ret = float64_sqrt(arg, &env->fp_status);
 951    int status = get_float_exception_flags(&env->fp_status);
 952
 953    if (unlikely(status & float_flag_invalid)) {
 954        if (unlikely(float64_is_any_nan(arg))) {
 955            if (unlikely(float64_is_signaling_nan(arg, &env->fp_status))) {
 956                /* sNaN square root */
 957                float_invalid_op_vxsnan(env, GETPC());
 958            }
 959        } else {
 960            /* Square root of a negative nonzero number */
 961            float_invalid_op_vxsqrt(env, 1, GETPC());
 962        }
 963    }
 964
 965    return ret;
 966}
 967
 968/* fre - fre. */
 969float64 helper_fre(CPUPPCState *env, float64 arg)
 970{
 971    /* "Estimate" the reciprocal with actual division.  */
 972    float64 ret = float64_div(float64_one, arg, &env->fp_status);
 973    int status = get_float_exception_flags(&env->fp_status);
 974
 975    if (unlikely(status)) {
 976        if (status & float_flag_invalid) {
 977            if (float64_is_signaling_nan(arg, &env->fp_status)) {
 978                /* sNaN reciprocal */
 979                float_invalid_op_vxsnan(env, GETPC());
 980            }
 981        }
 982        if (status & float_flag_divbyzero) {
 983            float_zero_divide_excp(env, GETPC());
 984            /* For FPSCR.ZE == 0, the result is 1/2.  */
 985            ret = float64_set_sign(float64_half, float64_is_neg(arg));
 986        }
 987    }
 988
 989    return ret;
 990}
 991
 992/* fres - fres. */
 993uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
 994{
 995    CPU_DoubleU farg;
 996    float32 f32;
 997
 998    farg.ll = arg;
 999
1000    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
1001        /* sNaN reciprocal */
1002        float_invalid_op_vxsnan(env, GETPC());
1003    }
1004    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1005    f32 = float64_to_float32(farg.d, &env->fp_status);
1006    farg.d = float32_to_float64(f32, &env->fp_status);
1007
1008    return farg.ll;
1009}
1010
1011/* frsqrte  - frsqrte. */
1012float64 helper_frsqrte(CPUPPCState *env, float64 arg)
1013{
1014    /* "Estimate" the reciprocal with actual division.  */
1015    float64 rets = float64_sqrt(arg, &env->fp_status);
1016    float64 retd = float64_div(float64_one, rets, &env->fp_status);
1017    int status = get_float_exception_flags(&env->fp_status);
1018
1019    if (unlikely(status)) {
1020        if (status & float_flag_invalid) {
1021            if (float64_is_signaling_nan(arg, &env->fp_status)) {
1022                /* sNaN reciprocal */
1023                float_invalid_op_vxsnan(env, GETPC());
1024            } else {
1025                /* Square root of a negative nonzero number */
1026                float_invalid_op_vxsqrt(env, 1, GETPC());
1027            }
1028        }
1029        if (status & float_flag_divbyzero) {
1030            /* Reciprocal of (square root of) zero.  */
1031            float_zero_divide_excp(env, GETPC());
1032        }
1033    }
1034
1035    return retd;
1036}
1037
1038/* fsel - fsel. */
1039uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1040                     uint64_t arg3)
1041{
1042    CPU_DoubleU farg1;
1043
1044    farg1.ll = arg1;
1045
1046    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
1047        !float64_is_any_nan(farg1.d)) {
1048        return arg2;
1049    } else {
1050        return arg3;
1051    }
1052}
1053
1054uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
1055{
1056    int fe_flag = 0;
1057    int fg_flag = 0;
1058
1059    if (unlikely(float64_is_infinity(fra) ||
1060                 float64_is_infinity(frb) ||
1061                 float64_is_zero(frb))) {
1062        fe_flag = 1;
1063        fg_flag = 1;
1064    } else {
1065        int e_a = ppc_float64_get_unbiased_exp(fra);
1066        int e_b = ppc_float64_get_unbiased_exp(frb);
1067
1068        if (unlikely(float64_is_any_nan(fra) ||
1069                     float64_is_any_nan(frb))) {
1070            fe_flag = 1;
1071        } else if ((e_b <= -1022) || (e_b >= 1021)) {
1072            fe_flag = 1;
1073        } else if (!float64_is_zero(fra) &&
1074                   (((e_a - e_b) >= 1023) ||
1075                    ((e_a - e_b) <= -1021) ||
1076                    (e_a <= -970))) {
1077            fe_flag = 1;
1078        }
1079
1080        if (unlikely(float64_is_zero_or_denormal(frb))) {
1081            /* XB is not zero because of the above check and */
1082            /* so must be denormalized.                      */
1083            fg_flag = 1;
1084        }
1085    }
1086
1087    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1088}
1089
1090uint32_t helper_ftsqrt(uint64_t frb)
1091{
1092    int fe_flag = 0;
1093    int fg_flag = 0;
1094
1095    if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
1096        fe_flag = 1;
1097        fg_flag = 1;
1098    } else {
1099        int e_b = ppc_float64_get_unbiased_exp(frb);
1100
1101        if (unlikely(float64_is_any_nan(frb))) {
1102            fe_flag = 1;
1103        } else if (unlikely(float64_is_zero(frb))) {
1104            fe_flag = 1;
1105        } else if (unlikely(float64_is_neg(frb))) {
1106            fe_flag = 1;
1107        } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) {
1108            fe_flag = 1;
1109        }
1110
1111        if (unlikely(float64_is_zero_or_denormal(frb))) {
1112            /* XB is not zero because of the above check and */
1113            /* therefore must be denormalized.               */
1114            fg_flag = 1;
1115        }
1116    }
1117
1118    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1119}
1120
1121void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1122                  uint32_t crfD)
1123{
1124    CPU_DoubleU farg1, farg2;
1125    uint32_t ret = 0;
1126
1127    farg1.ll = arg1;
1128    farg2.ll = arg2;
1129
1130    if (unlikely(float64_is_any_nan(farg1.d) ||
1131                 float64_is_any_nan(farg2.d))) {
1132        ret = 0x01UL;
1133    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1134        ret = 0x08UL;
1135    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1136        ret = 0x04UL;
1137    } else {
1138        ret = 0x02UL;
1139    }
1140
1141    env->fpscr &= ~FP_FPCC;
1142    env->fpscr |= ret << FPSCR_FPCC;
1143    env->crf[crfD] = ret;
1144    if (unlikely(ret == 0x01UL
1145                 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1146                     float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1147        /* sNaN comparison */
1148        float_invalid_op_vxsnan(env, GETPC());
1149    }
1150}
1151
1152void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1153                  uint32_t crfD)
1154{
1155    CPU_DoubleU farg1, farg2;
1156    uint32_t ret = 0;
1157
1158    farg1.ll = arg1;
1159    farg2.ll = arg2;
1160
1161    if (unlikely(float64_is_any_nan(farg1.d) ||
1162                 float64_is_any_nan(farg2.d))) {
1163        ret = 0x01UL;
1164    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1165        ret = 0x08UL;
1166    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1167        ret = 0x04UL;
1168    } else {
1169        ret = 0x02UL;
1170    }
1171
1172    env->fpscr &= ~FP_FPCC;
1173    env->fpscr |= ret << FPSCR_FPCC;
1174    env->crf[crfD] = (uint32_t) ret;
1175    if (unlikely(ret == 0x01UL)) {
1176        float_invalid_op_vxvc(env, 1, GETPC());
1177        if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1178            float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1179            /* sNaN comparison */
1180            float_invalid_op_vxsnan(env, GETPC());
1181        }
1182    }
1183}
1184
1185/* Single-precision floating-point conversions */
1186static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1187{
1188    CPU_FloatU u;
1189
1190    u.f = int32_to_float32(val, &env->vec_status);
1191
1192    return u.l;
1193}
1194
1195static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1196{
1197    CPU_FloatU u;
1198
1199    u.f = uint32_to_float32(val, &env->vec_status);
1200
1201    return u.l;
1202}
1203
1204static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1205{
1206    CPU_FloatU u;
1207
1208    u.l = val;
1209    /* NaN are not treated the same way IEEE 754 does */
1210    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1211        return 0;
1212    }
1213
1214    return float32_to_int32(u.f, &env->vec_status);
1215}
1216
1217static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1218{
1219    CPU_FloatU u;
1220
1221    u.l = val;
1222    /* NaN are not treated the same way IEEE 754 does */
1223    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1224        return 0;
1225    }
1226
1227    return float32_to_uint32(u.f, &env->vec_status);
1228}
1229
1230static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1231{
1232    CPU_FloatU u;
1233
1234    u.l = val;
1235    /* NaN are not treated the same way IEEE 754 does */
1236    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1237        return 0;
1238    }
1239
1240    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1241}
1242
1243static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1244{
1245    CPU_FloatU u;
1246
1247    u.l = val;
1248    /* NaN are not treated the same way IEEE 754 does */
1249    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1250        return 0;
1251    }
1252
1253    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1254}
1255
1256static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1257{
1258    CPU_FloatU u;
1259    float32 tmp;
1260
1261    u.f = int32_to_float32(val, &env->vec_status);
1262    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1263    u.f = float32_div(u.f, tmp, &env->vec_status);
1264
1265    return u.l;
1266}
1267
1268static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1269{
1270    CPU_FloatU u;
1271    float32 tmp;
1272
1273    u.f = uint32_to_float32(val, &env->vec_status);
1274    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1275    u.f = float32_div(u.f, tmp, &env->vec_status);
1276
1277    return u.l;
1278}
1279
1280static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1281{
1282    CPU_FloatU u;
1283    float32 tmp;
1284
1285    u.l = val;
1286    /* NaN are not treated the same way IEEE 754 does */
1287    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1288        return 0;
1289    }
1290    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1291    u.f = float32_mul(u.f, tmp, &env->vec_status);
1292
1293    return float32_to_int32(u.f, &env->vec_status);
1294}
1295
1296static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1297{
1298    CPU_FloatU u;
1299    float32 tmp;
1300
1301    u.l = val;
1302    /* NaN are not treated the same way IEEE 754 does */
1303    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1304        return 0;
1305    }
1306    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1307    u.f = float32_mul(u.f, tmp, &env->vec_status);
1308
1309    return float32_to_uint32(u.f, &env->vec_status);
1310}
1311
1312#define HELPER_SPE_SINGLE_CONV(name)                              \
1313    uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1314    {                                                             \
1315        return e##name(env, val);                                 \
1316    }
1317/* efscfsi */
1318HELPER_SPE_SINGLE_CONV(fscfsi);
1319/* efscfui */
1320HELPER_SPE_SINGLE_CONV(fscfui);
1321/* efscfuf */
1322HELPER_SPE_SINGLE_CONV(fscfuf);
1323/* efscfsf */
1324HELPER_SPE_SINGLE_CONV(fscfsf);
1325/* efsctsi */
1326HELPER_SPE_SINGLE_CONV(fsctsi);
1327/* efsctui */
1328HELPER_SPE_SINGLE_CONV(fsctui);
1329/* efsctsiz */
1330HELPER_SPE_SINGLE_CONV(fsctsiz);
1331/* efsctuiz */
1332HELPER_SPE_SINGLE_CONV(fsctuiz);
1333/* efsctsf */
1334HELPER_SPE_SINGLE_CONV(fsctsf);
1335/* efsctuf */
1336HELPER_SPE_SINGLE_CONV(fsctuf);
1337
1338#define HELPER_SPE_VECTOR_CONV(name)                            \
1339    uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1340    {                                                           \
1341        return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1342            (uint64_t)e##name(env, val);                        \
1343    }
1344/* evfscfsi */
1345HELPER_SPE_VECTOR_CONV(fscfsi);
1346/* evfscfui */
1347HELPER_SPE_VECTOR_CONV(fscfui);
1348/* evfscfuf */
1349HELPER_SPE_VECTOR_CONV(fscfuf);
1350/* evfscfsf */
1351HELPER_SPE_VECTOR_CONV(fscfsf);
1352/* evfsctsi */
1353HELPER_SPE_VECTOR_CONV(fsctsi);
1354/* evfsctui */
1355HELPER_SPE_VECTOR_CONV(fsctui);
1356/* evfsctsiz */
1357HELPER_SPE_VECTOR_CONV(fsctsiz);
1358/* evfsctuiz */
1359HELPER_SPE_VECTOR_CONV(fsctuiz);
1360/* evfsctsf */
1361HELPER_SPE_VECTOR_CONV(fsctsf);
1362/* evfsctuf */
1363HELPER_SPE_VECTOR_CONV(fsctuf);
1364
1365/* Single-precision floating-point arithmetic */
1366static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1367{
1368    CPU_FloatU u1, u2;
1369
1370    u1.l = op1;
1371    u2.l = op2;
1372    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1373    return u1.l;
1374}
1375
1376static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1377{
1378    CPU_FloatU u1, u2;
1379
1380    u1.l = op1;
1381    u2.l = op2;
1382    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1383    return u1.l;
1384}
1385
1386static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1387{
1388    CPU_FloatU u1, u2;
1389
1390    u1.l = op1;
1391    u2.l = op2;
1392    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1393    return u1.l;
1394}
1395
1396static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1397{
1398    CPU_FloatU u1, u2;
1399
1400    u1.l = op1;
1401    u2.l = op2;
1402    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1403    return u1.l;
1404}
1405
1406#define HELPER_SPE_SINGLE_ARITH(name)                                   \
1407    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1408    {                                                                   \
1409        return e##name(env, op1, op2);                                  \
1410    }
1411/* efsadd */
1412HELPER_SPE_SINGLE_ARITH(fsadd);
1413/* efssub */
1414HELPER_SPE_SINGLE_ARITH(fssub);
1415/* efsmul */
1416HELPER_SPE_SINGLE_ARITH(fsmul);
1417/* efsdiv */
1418HELPER_SPE_SINGLE_ARITH(fsdiv);
1419
1420#define HELPER_SPE_VECTOR_ARITH(name)                                   \
1421    uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1422    {                                                                   \
1423        return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1424            (uint64_t)e##name(env, op1, op2);                           \
1425    }
1426/* evfsadd */
1427HELPER_SPE_VECTOR_ARITH(fsadd);
1428/* evfssub */
1429HELPER_SPE_VECTOR_ARITH(fssub);
1430/* evfsmul */
1431HELPER_SPE_VECTOR_ARITH(fsmul);
1432/* evfsdiv */
1433HELPER_SPE_VECTOR_ARITH(fsdiv);
1434
1435/* Single-precision floating-point comparisons */
1436static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1437{
1438    CPU_FloatU u1, u2;
1439
1440    u1.l = op1;
1441    u2.l = op2;
1442    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1443}
1444
1445static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1446{
1447    CPU_FloatU u1, u2;
1448
1449    u1.l = op1;
1450    u2.l = op2;
1451    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1452}
1453
1454static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1455{
1456    CPU_FloatU u1, u2;
1457
1458    u1.l = op1;
1459    u2.l = op2;
1460    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1461}
1462
1463static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1464{
1465    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1466    return efscmplt(env, op1, op2);
1467}
1468
1469static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1470{
1471    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1472    return efscmpgt(env, op1, op2);
1473}
1474
1475static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1476{
1477    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1478    return efscmpeq(env, op1, op2);
1479}
1480
1481#define HELPER_SINGLE_SPE_CMP(name)                                     \
1482    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1483    {                                                                   \
1484        return e##name(env, op1, op2);                                  \
1485    }
1486/* efststlt */
1487HELPER_SINGLE_SPE_CMP(fststlt);
1488/* efststgt */
1489HELPER_SINGLE_SPE_CMP(fststgt);
1490/* efststeq */
1491HELPER_SINGLE_SPE_CMP(fststeq);
1492/* efscmplt */
1493HELPER_SINGLE_SPE_CMP(fscmplt);
1494/* efscmpgt */
1495HELPER_SINGLE_SPE_CMP(fscmpgt);
1496/* efscmpeq */
1497HELPER_SINGLE_SPE_CMP(fscmpeq);
1498
1499static inline uint32_t evcmp_merge(int t0, int t1)
1500{
1501    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1502}
1503
1504#define HELPER_VECTOR_SPE_CMP(name)                                     \
1505    uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1506    {                                                                   \
1507        return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1508                           e##name(env, op1, op2));                     \
1509    }
1510/* evfststlt */
1511HELPER_VECTOR_SPE_CMP(fststlt);
1512/* evfststgt */
1513HELPER_VECTOR_SPE_CMP(fststgt);
1514/* evfststeq */
1515HELPER_VECTOR_SPE_CMP(fststeq);
1516/* evfscmplt */
1517HELPER_VECTOR_SPE_CMP(fscmplt);
1518/* evfscmpgt */
1519HELPER_VECTOR_SPE_CMP(fscmpgt);
1520/* evfscmpeq */
1521HELPER_VECTOR_SPE_CMP(fscmpeq);
1522
1523/* Double-precision floating-point conversion */
1524uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1525{
1526    CPU_DoubleU u;
1527
1528    u.d = int32_to_float64(val, &env->vec_status);
1529
1530    return u.ll;
1531}
1532
1533uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1534{
1535    CPU_DoubleU u;
1536
1537    u.d = int64_to_float64(val, &env->vec_status);
1538
1539    return u.ll;
1540}
1541
1542uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1543{
1544    CPU_DoubleU u;
1545
1546    u.d = uint32_to_float64(val, &env->vec_status);
1547
1548    return u.ll;
1549}
1550
1551uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1552{
1553    CPU_DoubleU u;
1554
1555    u.d = uint64_to_float64(val, &env->vec_status);
1556
1557    return u.ll;
1558}
1559
1560uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1561{
1562    CPU_DoubleU u;
1563
1564    u.ll = val;
1565    /* NaN are not treated the same way IEEE 754 does */
1566    if (unlikely(float64_is_any_nan(u.d))) {
1567        return 0;
1568    }
1569
1570    return float64_to_int32(u.d, &env->vec_status);
1571}
1572
1573uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1574{
1575    CPU_DoubleU u;
1576
1577    u.ll = val;
1578    /* NaN are not treated the same way IEEE 754 does */
1579    if (unlikely(float64_is_any_nan(u.d))) {
1580        return 0;
1581    }
1582
1583    return float64_to_uint32(u.d, &env->vec_status);
1584}
1585
1586uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1587{
1588    CPU_DoubleU u;
1589
1590    u.ll = val;
1591    /* NaN are not treated the same way IEEE 754 does */
1592    if (unlikely(float64_is_any_nan(u.d))) {
1593        return 0;
1594    }
1595
1596    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1597}
1598
1599uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1600{
1601    CPU_DoubleU u;
1602
1603    u.ll = val;
1604    /* NaN are not treated the same way IEEE 754 does */
1605    if (unlikely(float64_is_any_nan(u.d))) {
1606        return 0;
1607    }
1608
1609    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1610}
1611
1612uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1613{
1614    CPU_DoubleU u;
1615
1616    u.ll = val;
1617    /* NaN are not treated the same way IEEE 754 does */
1618    if (unlikely(float64_is_any_nan(u.d))) {
1619        return 0;
1620    }
1621
1622    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1623}
1624
1625uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1626{
1627    CPU_DoubleU u;
1628
1629    u.ll = val;
1630    /* NaN are not treated the same way IEEE 754 does */
1631    if (unlikely(float64_is_any_nan(u.d))) {
1632        return 0;
1633    }
1634
1635    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1636}
1637
1638uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1639{
1640    CPU_DoubleU u;
1641    float64 tmp;
1642
1643    u.d = int32_to_float64(val, &env->vec_status);
1644    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1645    u.d = float64_div(u.d, tmp, &env->vec_status);
1646
1647    return u.ll;
1648}
1649
1650uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1651{
1652    CPU_DoubleU u;
1653    float64 tmp;
1654
1655    u.d = uint32_to_float64(val, &env->vec_status);
1656    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1657    u.d = float64_div(u.d, tmp, &env->vec_status);
1658
1659    return u.ll;
1660}
1661
1662uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1663{
1664    CPU_DoubleU u;
1665    float64 tmp;
1666
1667    u.ll = val;
1668    /* NaN are not treated the same way IEEE 754 does */
1669    if (unlikely(float64_is_any_nan(u.d))) {
1670        return 0;
1671    }
1672    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1673    u.d = float64_mul(u.d, tmp, &env->vec_status);
1674
1675    return float64_to_int32(u.d, &env->vec_status);
1676}
1677
1678uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1679{
1680    CPU_DoubleU u;
1681    float64 tmp;
1682
1683    u.ll = val;
1684    /* NaN are not treated the same way IEEE 754 does */
1685    if (unlikely(float64_is_any_nan(u.d))) {
1686        return 0;
1687    }
1688    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1689    u.d = float64_mul(u.d, tmp, &env->vec_status);
1690
1691    return float64_to_uint32(u.d, &env->vec_status);
1692}
1693
1694uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1695{
1696    CPU_DoubleU u1;
1697    CPU_FloatU u2;
1698
1699    u1.ll = val;
1700    u2.f = float64_to_float32(u1.d, &env->vec_status);
1701
1702    return u2.l;
1703}
1704
1705uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1706{
1707    CPU_DoubleU u2;
1708    CPU_FloatU u1;
1709
1710    u1.l = val;
1711    u2.d = float32_to_float64(u1.f, &env->vec_status);
1712
1713    return u2.ll;
1714}
1715
1716/* Double precision fixed-point arithmetic */
1717uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1718{
1719    CPU_DoubleU u1, u2;
1720
1721    u1.ll = op1;
1722    u2.ll = op2;
1723    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1724    return u1.ll;
1725}
1726
1727uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1728{
1729    CPU_DoubleU u1, u2;
1730
1731    u1.ll = op1;
1732    u2.ll = op2;
1733    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1734    return u1.ll;
1735}
1736
1737uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1738{
1739    CPU_DoubleU u1, u2;
1740
1741    u1.ll = op1;
1742    u2.ll = op2;
1743    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1744    return u1.ll;
1745}
1746
1747uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1748{
1749    CPU_DoubleU u1, u2;
1750
1751    u1.ll = op1;
1752    u2.ll = op2;
1753    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1754    return u1.ll;
1755}
1756
1757/* Double precision floating point helpers */
1758uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1759{
1760    CPU_DoubleU u1, u2;
1761
1762    u1.ll = op1;
1763    u2.ll = op2;
1764    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1765}
1766
1767uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1768{
1769    CPU_DoubleU u1, u2;
1770
1771    u1.ll = op1;
1772    u2.ll = op2;
1773    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1774}
1775
1776uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1777{
1778    CPU_DoubleU u1, u2;
1779
1780    u1.ll = op1;
1781    u2.ll = op2;
1782    return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1783}
1784
1785uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1786{
1787    /* XXX: TODO: test special values (NaN, infinites, ...) */
1788    return helper_efdtstlt(env, op1, op2);
1789}
1790
1791uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1792{
1793    /* XXX: TODO: test special values (NaN, infinites, ...) */
1794    return helper_efdtstgt(env, op1, op2);
1795}
1796
1797uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1798{
1799    /* XXX: TODO: test special values (NaN, infinites, ...) */
1800    return helper_efdtsteq(env, op1, op2);
1801}
1802
1803#define float64_to_float64(x, env) x
1804
1805
1806/*
1807 * VSX_ADD_SUB - VSX floating point add/subract
1808 *   name  - instruction mnemonic
1809 *   op    - operation (add or sub)
1810 *   nels  - number of elements (1, 2 or 4)
1811 *   tp    - type (float32 or float64)
1812 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1813 *   sfprf - set FPRF
1814 */
1815#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
1816void helper_##name(CPUPPCState *env, ppc_vsr_t *xt,                          \
1817                   ppc_vsr_t *xa, ppc_vsr_t *xb)                             \
1818{                                                                            \
1819    ppc_vsr_t t = *xt;                                                       \
1820    int i;                                                                   \
1821                                                                             \
1822    helper_reset_fpstatus(env);                                              \
1823                                                                             \
1824    for (i = 0; i < nels; i++) {                                             \
1825        float_status tstat = env->fp_status;                                 \
1826        set_float_exception_flags(0, &tstat);                                \
1827        t.fld = tp##_##op(xa->fld, xb->fld, &tstat);                         \
1828        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1829                                                                             \
1830        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1831            float_invalid_op_addsub(env, sfprf, GETPC(),                     \
1832                                    tp##_classify(xa->fld) |                 \
1833                                    tp##_classify(xb->fld));                 \
1834        }                                                                    \
1835                                                                             \
1836        if (r2sp) {                                                          \
1837            t.fld = helper_frsp(env, t.fld);                                 \
1838        }                                                                    \
1839                                                                             \
1840        if (sfprf) {                                                         \
1841            helper_compute_fprf_float64(env, t.fld);                         \
1842        }                                                                    \
1843    }                                                                        \
1844    *xt = t;                                                                 \
1845    do_float_check_status(env, GETPC());                                     \
1846}
1847
1848VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1849VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1850VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1851VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1852VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1853VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1854VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1855VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1856
1857void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
1858                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1859{
1860    ppc_vsr_t t = *xt;
1861    float_status tstat;
1862
1863    helper_reset_fpstatus(env);
1864
1865    tstat = env->fp_status;
1866    if (unlikely(Rc(opcode) != 0)) {
1867        tstat.float_rounding_mode = float_round_to_odd;
1868    }
1869
1870    set_float_exception_flags(0, &tstat);
1871    t.f128 = float128_add(xa->f128, xb->f128, &tstat);
1872    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1873
1874    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1875        float_invalid_op_addsub(env, 1, GETPC(),
1876                                float128_classify(xa->f128) |
1877                                float128_classify(xb->f128));
1878    }
1879
1880    helper_compute_fprf_float128(env, t.f128);
1881
1882    *xt = t;
1883    do_float_check_status(env, GETPC());
1884}
1885
1886/*
1887 * VSX_MUL - VSX floating point multiply
1888 *   op    - instruction mnemonic
1889 *   nels  - number of elements (1, 2 or 4)
1890 *   tp    - type (float32 or float64)
1891 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1892 *   sfprf - set FPRF
1893 */
1894#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
1895void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                            \
1896                 ppc_vsr_t *xa, ppc_vsr_t *xb)                               \
1897{                                                                            \
1898    ppc_vsr_t t = *xt;                                                       \
1899    int i;                                                                   \
1900                                                                             \
1901    helper_reset_fpstatus(env);                                              \
1902                                                                             \
1903    for (i = 0; i < nels; i++) {                                             \
1904        float_status tstat = env->fp_status;                                 \
1905        set_float_exception_flags(0, &tstat);                                \
1906        t.fld = tp##_mul(xa->fld, xb->fld, &tstat);                          \
1907        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1908                                                                             \
1909        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1910            float_invalid_op_mul(env, sfprf, GETPC(),                        \
1911                                 tp##_classify(xa->fld) |                    \
1912                                 tp##_classify(xb->fld));                    \
1913        }                                                                    \
1914                                                                             \
1915        if (r2sp) {                                                          \
1916            t.fld = helper_frsp(env, t.fld);                                 \
1917        }                                                                    \
1918                                                                             \
1919        if (sfprf) {                                                         \
1920            helper_compute_fprf_float64(env, t.fld);                         \
1921        }                                                                    \
1922    }                                                                        \
1923                                                                             \
1924    *xt = t;                                                                 \
1925    do_float_check_status(env, GETPC());                                     \
1926}
1927
1928VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1929VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1930VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1931VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1932
1933void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
1934                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1935{
1936    ppc_vsr_t t = *xt;
1937    float_status tstat;
1938
1939    helper_reset_fpstatus(env);
1940    tstat = env->fp_status;
1941    if (unlikely(Rc(opcode) != 0)) {
1942        tstat.float_rounding_mode = float_round_to_odd;
1943    }
1944
1945    set_float_exception_flags(0, &tstat);
1946    t.f128 = float128_mul(xa->f128, xb->f128, &tstat);
1947    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1948
1949    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1950        float_invalid_op_mul(env, 1, GETPC(),
1951                             float128_classify(xa->f128) |
1952                             float128_classify(xb->f128));
1953    }
1954    helper_compute_fprf_float128(env, t.f128);
1955
1956    *xt = t;
1957    do_float_check_status(env, GETPC());
1958}
1959
1960/*
1961 * VSX_DIV - VSX floating point divide
1962 *   op    - instruction mnemonic
1963 *   nels  - number of elements (1, 2 or 4)
1964 *   tp    - type (float32 or float64)
1965 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1966 *   sfprf - set FPRF
1967 */
1968#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
1969void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
1970                 ppc_vsr_t *xa, ppc_vsr_t *xb)                                \
1971{                                                                             \
1972    ppc_vsr_t t = *xt;                                                        \
1973    int i;                                                                    \
1974                                                                              \
1975    helper_reset_fpstatus(env);                                               \
1976                                                                              \
1977    for (i = 0; i < nels; i++) {                                              \
1978        float_status tstat = env->fp_status;                                  \
1979        set_float_exception_flags(0, &tstat);                                 \
1980        t.fld = tp##_div(xa->fld, xb->fld, &tstat);                           \
1981        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1982                                                                              \
1983        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1984            float_invalid_op_div(env, sfprf, GETPC(),                         \
1985                                 tp##_classify(xa->fld) |                     \
1986                                 tp##_classify(xb->fld));                     \
1987        }                                                                     \
1988        if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {   \
1989            float_zero_divide_excp(env, GETPC());                             \
1990        }                                                                     \
1991                                                                              \
1992        if (r2sp) {                                                           \
1993            t.fld = helper_frsp(env, t.fld);                                  \
1994        }                                                                     \
1995                                                                              \
1996        if (sfprf) {                                                          \
1997            helper_compute_fprf_float64(env, t.fld);                          \
1998        }                                                                     \
1999    }                                                                         \
2000                                                                              \
2001    *xt = t;                                                                  \
2002    do_float_check_status(env, GETPC());                                      \
2003}
2004
2005VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
2006VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
2007VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
2008VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
2009
2010void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
2011                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
2012{
2013    ppc_vsr_t t = *xt;
2014    float_status tstat;
2015
2016    helper_reset_fpstatus(env);
2017    tstat = env->fp_status;
2018    if (unlikely(Rc(opcode) != 0)) {
2019        tstat.float_rounding_mode = float_round_to_odd;
2020    }
2021
2022    set_float_exception_flags(0, &tstat);
2023    t.f128 = float128_div(xa->f128, xb->f128, &tstat);
2024    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2025
2026    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
2027        float_invalid_op_div(env, 1, GETPC(),
2028                             float128_classify(xa->f128) |
2029                             float128_classify(xb->f128));
2030    }
2031    if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
2032        float_zero_divide_excp(env, GETPC());
2033    }
2034
2035    helper_compute_fprf_float128(env, t.f128);
2036    *xt = t;
2037    do_float_check_status(env, GETPC());
2038}
2039
2040/*
2041 * VSX_RE  - VSX floating point reciprocal estimate
2042 *   op    - instruction mnemonic
2043 *   nels  - number of elements (1, 2 or 4)
2044 *   tp    - type (float32 or float64)
2045 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2046 *   sfprf - set FPRF
2047 */
2048#define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
2049void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)              \
2050{                                                                             \
2051    ppc_vsr_t t = *xt;                                                        \
2052    int i;                                                                    \
2053                                                                              \
2054    helper_reset_fpstatus(env);                                               \
2055                                                                              \
2056    for (i = 0; i < nels; i++) {                                              \
2057        if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) {      \
2058            float_invalid_op_vxsnan(env, GETPC());                            \
2059        }                                                                     \
2060        t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status);                 \
2061                                                                              \
2062        if (r2sp) {                                                           \
2063            t.fld = helper_frsp(env, t.fld);                                  \
2064        }                                                                     \
2065                                                                              \
2066        if (sfprf) {                                                          \
2067            helper_compute_fprf_float64(env, t.fld);                          \
2068        }                                                                     \
2069    }                                                                         \
2070                                                                              \
2071    *xt = t;                                                                  \
2072    do_float_check_status(env, GETPC());                                      \
2073}
2074
2075VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
2076VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
2077VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
2078VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
2079
2080/*
2081 * VSX_SQRT - VSX floating point square root
2082 *   op    - instruction mnemonic
2083 *   nels  - number of elements (1, 2 or 4)
2084 *   tp    - type (float32 or float64)
2085 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2086 *   sfprf - set FPRF
2087 */
2088#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
2089void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2090{                                                                            \
2091    ppc_vsr_t t = *xt;                                                       \
2092    int i;                                                                   \
2093                                                                             \
2094    helper_reset_fpstatus(env);                                              \
2095                                                                             \
2096    for (i = 0; i < nels; i++) {                                             \
2097        float_status tstat = env->fp_status;                                 \
2098        set_float_exception_flags(0, &tstat);                                \
2099        t.fld = tp##_sqrt(xb->fld, &tstat);                                  \
2100        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2101                                                                             \
2102        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2103            if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) {            \
2104                float_invalid_op_vxsqrt(env, sfprf, GETPC());                \
2105            } else if (tp##_is_signaling_nan(xb->fld, &tstat)) {             \
2106                float_invalid_op_vxsnan(env, GETPC());                       \
2107            }                                                                \
2108        }                                                                    \
2109                                                                             \
2110        if (r2sp) {                                                          \
2111            t.fld = helper_frsp(env, t.fld);                                 \
2112        }                                                                    \
2113                                                                             \
2114        if (sfprf) {                                                         \
2115            helper_compute_fprf_float64(env, t.fld);                         \
2116        }                                                                    \
2117    }                                                                        \
2118                                                                             \
2119    *xt = t;                                                                 \
2120    do_float_check_status(env, GETPC());                                     \
2121}
2122
2123VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2124VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2125VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2126VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2127
2128/*
2129 *VSX_RSQRTE - VSX floating point reciprocal square root estimate
2130 *   op    - instruction mnemonic
2131 *   nels  - number of elements (1, 2 or 4)
2132 *   tp    - type (float32 or float64)
2133 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2134 *   sfprf - set FPRF
2135 */
2136#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
2137void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2138{                                                                            \
2139    ppc_vsr_t t = *xt;                                                       \
2140    int i;                                                                   \
2141                                                                             \
2142    helper_reset_fpstatus(env);                                              \
2143                                                                             \
2144    for (i = 0; i < nels; i++) {                                             \
2145        float_status tstat = env->fp_status;                                 \
2146        set_float_exception_flags(0, &tstat);                                \
2147        t.fld = tp##_sqrt(xb->fld, &tstat);                                  \
2148        t.fld = tp##_div(tp##_one, t.fld, &tstat);                           \
2149        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2150                                                                             \
2151        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2152            if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) {            \
2153                float_invalid_op_vxsqrt(env, sfprf, GETPC());                \
2154            } else if (tp##_is_signaling_nan(xb->fld, &tstat)) {             \
2155                float_invalid_op_vxsnan(env, GETPC());                       \
2156            }                                                                \
2157        }                                                                    \
2158                                                                             \
2159        if (r2sp) {                                                          \
2160            t.fld = helper_frsp(env, t.fld);                                 \
2161        }                                                                    \
2162                                                                             \
2163        if (sfprf) {                                                         \
2164            helper_compute_fprf_float64(env, t.fld);                         \
2165        }                                                                    \
2166    }                                                                        \
2167                                                                             \
2168    *xt = t;                                                                 \
2169    do_float_check_status(env, GETPC());                                     \
2170}
2171
2172VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2173VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2174VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2175VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2176
2177/*
2178 * VSX_TDIV - VSX floating point test for divide
2179 *   op    - instruction mnemonic
2180 *   nels  - number of elements (1, 2 or 4)
2181 *   tp    - type (float32 or float64)
2182 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2183 *   emin  - minimum unbiased exponent
2184 *   emax  - maximum unbiased exponent
2185 *   nbits - number of fraction bits
2186 */
2187#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2188void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
2189                 ppc_vsr_t *xa, ppc_vsr_t *xb)                          \
2190{                                                                       \
2191    int i;                                                              \
2192    int fe_flag = 0;                                                    \
2193    int fg_flag = 0;                                                    \
2194                                                                        \
2195    for (i = 0; i < nels; i++) {                                        \
2196        if (unlikely(tp##_is_infinity(xa->fld) ||                       \
2197                     tp##_is_infinity(xb->fld) ||                       \
2198                     tp##_is_zero(xb->fld))) {                          \
2199            fe_flag = 1;                                                \
2200            fg_flag = 1;                                                \
2201        } else {                                                        \
2202            int e_a = ppc_##tp##_get_unbiased_exp(xa->fld);             \
2203            int e_b = ppc_##tp##_get_unbiased_exp(xb->fld);             \
2204                                                                        \
2205            if (unlikely(tp##_is_any_nan(xa->fld) ||                    \
2206                         tp##_is_any_nan(xb->fld))) {                   \
2207                fe_flag = 1;                                            \
2208            } else if ((e_b <= emin) || (e_b >= (emax - 2))) {          \
2209                fe_flag = 1;                                            \
2210            } else if (!tp##_is_zero(xa->fld) &&                        \
2211                       (((e_a - e_b) >= emax) ||                        \
2212                        ((e_a - e_b) <= (emin + 1)) ||                  \
2213                        (e_a <= (emin + nbits)))) {                     \
2214                fe_flag = 1;                                            \
2215            }                                                           \
2216                                                                        \
2217            if (unlikely(tp##_is_zero_or_denormal(xb->fld))) {          \
2218                /*                                                      \
2219                 * XB is not zero because of the above check and so     \
2220                 * must be denormalized.                                \
2221                 */                                                     \
2222                fg_flag = 1;                                            \
2223            }                                                           \
2224        }                                                               \
2225    }                                                                   \
2226                                                                        \
2227    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2228}
2229
2230VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2231VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2232VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2233
2234/*
2235 * VSX_TSQRT - VSX floating point test for square root
2236 *   op    - instruction mnemonic
2237 *   nels  - number of elements (1, 2 or 4)
2238 *   tp    - type (float32 or float64)
2239 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2240 *   emin  - minimum unbiased exponent
2241 *   emax  - maximum unbiased exponent
2242 *   nbits - number of fraction bits
2243 */
2244#define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2245void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)      \
2246{                                                                       \
2247    int i;                                                              \
2248    int fe_flag = 0;                                                    \
2249    int fg_flag = 0;                                                    \
2250                                                                        \
2251    for (i = 0; i < nels; i++) {                                        \
2252        if (unlikely(tp##_is_infinity(xb->fld) ||                       \
2253                     tp##_is_zero(xb->fld))) {                          \
2254            fe_flag = 1;                                                \
2255            fg_flag = 1;                                                \
2256        } else {                                                        \
2257            int e_b = ppc_##tp##_get_unbiased_exp(xb->fld);             \
2258                                                                        \
2259            if (unlikely(tp##_is_any_nan(xb->fld))) {                   \
2260                fe_flag = 1;                                            \
2261            } else if (unlikely(tp##_is_zero(xb->fld))) {               \
2262                fe_flag = 1;                                            \
2263            } else if (unlikely(tp##_is_neg(xb->fld))) {                \
2264                fe_flag = 1;                                            \
2265            } else if (!tp##_is_zero(xb->fld) &&                        \
2266                       (e_b <= (emin + nbits))) {                       \
2267                fe_flag = 1;                                            \
2268            }                                                           \
2269                                                                        \
2270            if (unlikely(tp##_is_zero_or_denormal(xb->fld))) {          \
2271                /*                                                      \
2272                 * XB is not zero because of the above check and        \
2273                 * therefore must be denormalized.                      \
2274                 */                                                     \
2275                fg_flag = 1;                                            \
2276            }                                                           \
2277        }                                                               \
2278    }                                                                   \
2279                                                                        \
2280    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2281}
2282
2283VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2284VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2285VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2286
2287/*
2288 * VSX_MADD - VSX floating point muliply/add variations
2289 *   op    - instruction mnemonic
2290 *   nels  - number of elements (1, 2 or 4)
2291 *   tp    - type (float32 or float64)
2292 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2293 *   maddflgs - flags for the float*muladd routine that control the
2294 *           various forms (madd, msub, nmadd, nmsub)
2295 *   sfprf - set FPRF
2296 */
2297#define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp)                    \
2298void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
2299                 ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c)                   \
2300{                                                                             \
2301    ppc_vsr_t t = *xt;                                                        \
2302    int i;                                                                    \
2303                                                                              \
2304    helper_reset_fpstatus(env);                                               \
2305                                                                              \
2306    for (i = 0; i < nels; i++) {                                              \
2307        float_status tstat = env->fp_status;                                  \
2308        set_float_exception_flags(0, &tstat);                                 \
2309        if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2310            /*                                                                \
2311             * Avoid double rounding errors by rounding the intermediate      \
2312             * result to odd.                                                 \
2313             */                                                               \
2314            set_float_rounding_mode(float_round_to_zero, &tstat);             \
2315            t.fld = tp##_muladd(xa->fld, b->fld, c->fld,                      \
2316                                maddflgs, &tstat);                            \
2317            t.fld |= (get_float_exception_flags(&tstat) &                     \
2318                      float_flag_inexact) != 0;                               \
2319        } else {                                                              \
2320            t.fld = tp##_muladd(xa->fld, b->fld, c->fld,                      \
2321                                maddflgs, &tstat);                            \
2322        }                                                                     \
2323        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2324                                                                              \
2325        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2326            tp##_maddsub_update_excp(env, xa->fld, b->fld,                    \
2327                                     c->fld, maddflgs, GETPC());              \
2328        }                                                                     \
2329                                                                              \
2330        if (r2sp) {                                                           \
2331            t.fld = helper_frsp(env, t.fld);                                  \
2332        }                                                                     \
2333                                                                              \
2334        if (sfprf) {                                                          \
2335            helper_compute_fprf_float64(env, t.fld);                          \
2336        }                                                                     \
2337    }                                                                         \
2338    *xt = t;                                                                  \
2339    do_float_check_status(env, GETPC());                                      \
2340}
2341
2342VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0)
2343VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0)
2344VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0)
2345VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0)
2346VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1)
2347VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1)
2348VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1)
2349VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1)
2350
2351VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0)
2352VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0)
2353VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0)
2354VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0)
2355
2356VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0)
2357VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0)
2358VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0)
2359VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0)
2360
2361/*
2362 * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2363 *   op    - instruction mnemonic
2364 *   cmp   - comparison operation
2365 *   exp   - expected result of comparison
2366 *   svxvc - set VXVC bit
2367 */
2368#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc)                                \
2369void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
2370                 ppc_vsr_t *xa, ppc_vsr_t *xb)                                \
2371{                                                                             \
2372    ppc_vsr_t t = *xt;                                                        \
2373    bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false;            \
2374                                                                              \
2375    if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||             \
2376        float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {             \
2377        vxsnan_flag = true;                                                   \
2378        if (fpscr_ve == 0 && svxvc) {                                         \
2379            vxvc_flag = true;                                                 \
2380        }                                                                     \
2381    } else if (svxvc) {                                                       \
2382        vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||     \
2383            float64_is_quiet_nan(xb->VsrD(0), &env->fp_status);               \
2384    }                                                                         \
2385    if (vxsnan_flag) {                                                        \
2386        float_invalid_op_vxsnan(env, GETPC());                                \
2387    }                                                                         \
2388    if (vxvc_flag) {                                                          \
2389        float_invalid_op_vxvc(env, 0, GETPC());                               \
2390    }                                                                         \
2391    vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag);                        \
2392                                                                              \
2393    if (!vex_flag) {                                                          \
2394        if (float64_##cmp(xb->VsrD(0), xa->VsrD(0),                           \
2395                          &env->fp_status) == exp) {                          \
2396            t.VsrD(0) = -1;                                                   \
2397            t.VsrD(1) = 0;                                                    \
2398        } else {                                                              \
2399            t.VsrD(0) = 0;                                                    \
2400            t.VsrD(1) = 0;                                                    \
2401        }                                                                     \
2402    }                                                                         \
2403    *xt = t;                                                                  \
2404    do_float_check_status(env, GETPC());                                      \
2405}
2406
2407VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2408VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2409VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2410VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2411
2412void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
2413                       ppc_vsr_t *xa, ppc_vsr_t *xb)
2414{
2415    int64_t exp_a, exp_b;
2416    uint32_t cc;
2417
2418    exp_a = extract64(xa->VsrD(0), 52, 11);
2419    exp_b = extract64(xb->VsrD(0), 52, 11);
2420
2421    if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||
2422                 float64_is_any_nan(xb->VsrD(0)))) {
2423        cc = CRF_SO;
2424    } else {
2425        if (exp_a < exp_b) {
2426            cc = CRF_LT;
2427        } else if (exp_a > exp_b) {
2428            cc = CRF_GT;
2429        } else {
2430            cc = CRF_EQ;
2431        }
2432    }
2433
2434    env->fpscr &= ~FP_FPCC;
2435    env->fpscr |= cc << FPSCR_FPCC;
2436    env->crf[BF(opcode)] = cc;
2437
2438    do_float_check_status(env, GETPC());
2439}
2440
2441void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
2442                       ppc_vsr_t *xa, ppc_vsr_t *xb)
2443{
2444    int64_t exp_a, exp_b;
2445    uint32_t cc;
2446
2447    exp_a = extract64(xa->VsrD(0), 48, 15);
2448    exp_b = extract64(xb->VsrD(0), 48, 15);
2449
2450    if (unlikely(float128_is_any_nan(xa->f128) ||
2451                 float128_is_any_nan(xb->f128))) {
2452        cc = CRF_SO;
2453    } else {
2454        if (exp_a < exp_b) {
2455            cc = CRF_LT;
2456        } else if (exp_a > exp_b) {
2457            cc = CRF_GT;
2458        } else {
2459            cc = CRF_EQ;
2460        }
2461    }
2462
2463    env->fpscr &= ~FP_FPCC;
2464    env->fpscr |= cc << FPSCR_FPCC;
2465    env->crf[BF(opcode)] = cc;
2466
2467    do_float_check_status(env, GETPC());
2468}
2469
2470#define VSX_SCALAR_CMP(op, ordered)                                      \
2471void helper_##op(CPUPPCState *env, uint32_t opcode,                      \
2472                 ppc_vsr_t *xa, ppc_vsr_t *xb)                           \
2473{                                                                        \
2474    uint32_t cc = 0;                                                     \
2475    bool vxsnan_flag = false, vxvc_flag = false;                         \
2476                                                                         \
2477    helper_reset_fpstatus(env);                                          \
2478                                                                         \
2479    if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||        \
2480        float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {        \
2481        vxsnan_flag = true;                                              \
2482        cc = CRF_SO;                                                     \
2483        if (fpscr_ve == 0 && ordered) {                                  \
2484            vxvc_flag = true;                                            \
2485        }                                                                \
2486    } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||     \
2487               float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) {     \
2488        cc = CRF_SO;                                                     \
2489        if (ordered) {                                                   \
2490            vxvc_flag = true;                                            \
2491        }                                                                \
2492    }                                                                    \
2493    if (vxsnan_flag) {                                                   \
2494        float_invalid_op_vxsnan(env, GETPC());                           \
2495    }                                                                    \
2496    if (vxvc_flag) {                                                     \
2497        float_invalid_op_vxvc(env, 0, GETPC());                          \
2498    }                                                                    \
2499                                                                         \
2500    if (float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) {         \
2501        cc |= CRF_LT;                                                    \
2502    } else if (!float64_le(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \
2503        cc |= CRF_GT;                                                    \
2504    } else {                                                             \
2505        cc |= CRF_EQ;                                                    \
2506    }                                                                    \
2507                                                                         \
2508    env->fpscr &= ~FP_FPCC;                                              \
2509    env->fpscr |= cc << FPSCR_FPCC;                                      \
2510    env->crf[BF(opcode)] = cc;                                           \
2511                                                                         \
2512    do_float_check_status(env, GETPC());                                 \
2513}
2514
2515VSX_SCALAR_CMP(xscmpodp, 1)
2516VSX_SCALAR_CMP(xscmpudp, 0)
2517
2518#define VSX_SCALAR_CMPQ(op, ordered)                                    \
2519void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
2520                 ppc_vsr_t *xa, ppc_vsr_t *xb)                          \
2521{                                                                       \
2522    uint32_t cc = 0;                                                    \
2523    bool vxsnan_flag = false, vxvc_flag = false;                        \
2524                                                                        \
2525    helper_reset_fpstatus(env);                                         \
2526                                                                        \
2527    if (float128_is_signaling_nan(xa->f128, &env->fp_status) ||         \
2528        float128_is_signaling_nan(xb->f128, &env->fp_status)) {         \
2529        vxsnan_flag = true;                                             \
2530        cc = CRF_SO;                                                    \
2531        if (fpscr_ve == 0 && ordered) {                                 \
2532            vxvc_flag = true;                                           \
2533        }                                                               \
2534    } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) ||      \
2535               float128_is_quiet_nan(xb->f128, &env->fp_status)) {      \
2536        cc = CRF_SO;                                                    \
2537        if (ordered) {                                                  \
2538            vxvc_flag = true;                                           \
2539        }                                                               \
2540    }                                                                   \
2541    if (vxsnan_flag) {                                                  \
2542        float_invalid_op_vxsnan(env, GETPC());                          \
2543    }                                                                   \
2544    if (vxvc_flag) {                                                    \
2545        float_invalid_op_vxvc(env, 0, GETPC());                         \
2546    }                                                                   \
2547                                                                        \
2548    if (float128_lt(xa->f128, xb->f128, &env->fp_status)) {             \
2549        cc |= CRF_LT;                                                   \
2550    } else if (!float128_le(xa->f128, xb->f128, &env->fp_status)) {     \
2551        cc |= CRF_GT;                                                   \
2552    } else {                                                            \
2553        cc |= CRF_EQ;                                                   \
2554    }                                                                   \
2555                                                                        \
2556    env->fpscr &= ~FP_FPCC;                                             \
2557    env->fpscr |= cc << FPSCR_FPCC;                                     \
2558    env->crf[BF(opcode)] = cc;                                          \
2559                                                                        \
2560    do_float_check_status(env, GETPC());                                \
2561}
2562
2563VSX_SCALAR_CMPQ(xscmpoqp, 1)
2564VSX_SCALAR_CMPQ(xscmpuqp, 0)
2565
2566/*
2567 * VSX_MAX_MIN - VSX floating point maximum/minimum
2568 *   name  - instruction mnemonic
2569 *   op    - operation (max or min)
2570 *   nels  - number of elements (1, 2 or 4)
2571 *   tp    - type (float32 or float64)
2572 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2573 */
2574#define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2575void helper_##name(CPUPPCState *env, ppc_vsr_t *xt,                           \
2576                   ppc_vsr_t *xa, ppc_vsr_t *xb)                              \
2577{                                                                             \
2578    ppc_vsr_t t = *xt;                                                        \
2579    int i;                                                                    \
2580                                                                              \
2581    for (i = 0; i < nels; i++) {                                              \
2582        t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status);                 \
2583        if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) ||       \
2584                     tp##_is_signaling_nan(xb->fld, &env->fp_status))) {      \
2585            float_invalid_op_vxsnan(env, GETPC());                            \
2586        }                                                                     \
2587    }                                                                         \
2588                                                                              \
2589    *xt = t;                                                                  \
2590    do_float_check_status(env, GETPC());                                      \
2591}
2592
2593VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2594VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2595VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2596VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2597VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2598VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2599
2600#define VSX_MAX_MINC(name, max)                                               \
2601void helper_##name(CPUPPCState *env, uint32_t opcode,                         \
2602                   ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)               \
2603{                                                                             \
2604    ppc_vsr_t t = *xt;                                                        \
2605    bool vxsnan_flag = false, vex_flag = false;                               \
2606                                                                              \
2607    if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||                           \
2608                 float64_is_any_nan(xb->VsrD(0)))) {                          \
2609        if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||         \
2610            float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {         \
2611            vxsnan_flag = true;                                               \
2612        }                                                                     \
2613        t.VsrD(0) = xb->VsrD(0);                                              \
2614    } else if ((max &&                                                        \
2615               !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) ||     \
2616               (!max &&                                                       \
2617               float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) {      \
2618        t.VsrD(0) = xa->VsrD(0);                                              \
2619    } else {                                                                  \
2620        t.VsrD(0) = xb->VsrD(0);                                              \
2621    }                                                                         \
2622                                                                              \
2623    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2624    if (vxsnan_flag) {                                                        \
2625        float_invalid_op_vxsnan(env, GETPC());                                \
2626    }                                                                         \
2627    if (!vex_flag) {                                                          \
2628        *xt = t;                                                              \
2629    }                                                                         \
2630}                                                                             \
2631
2632VSX_MAX_MINC(xsmaxcdp, 1);
2633VSX_MAX_MINC(xsmincdp, 0);
2634
2635#define VSX_MAX_MINJ(name, max)                                               \
2636void helper_##name(CPUPPCState *env, uint32_t opcode,                         \
2637                   ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)               \
2638{                                                                             \
2639    ppc_vsr_t t = *xt;                                                        \
2640    bool vxsnan_flag = false, vex_flag = false;                               \
2641                                                                              \
2642    if (unlikely(float64_is_any_nan(xa->VsrD(0)))) {                          \
2643        if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) {         \
2644            vxsnan_flag = true;                                               \
2645        }                                                                     \
2646        t.VsrD(0) = xa->VsrD(0);                                              \
2647    } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) {                   \
2648        if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {         \
2649            vxsnan_flag = true;                                               \
2650        }                                                                     \
2651        t.VsrD(0) = xb->VsrD(0);                                              \
2652    } else if (float64_is_zero(xa->VsrD(0)) &&                                \
2653               float64_is_zero(xb->VsrD(0))) {                                \
2654        if (max) {                                                            \
2655            if (!float64_is_neg(xa->VsrD(0)) ||                               \
2656                !float64_is_neg(xb->VsrD(0))) {                               \
2657                t.VsrD(0) = 0ULL;                                             \
2658            } else {                                                          \
2659                t.VsrD(0) = 0x8000000000000000ULL;                            \
2660            }                                                                 \
2661        } else {                                                              \
2662            if (float64_is_neg(xa->VsrD(0)) ||                                \
2663                float64_is_neg(xb->VsrD(0))) {                                \
2664                t.VsrD(0) = 0x8000000000000000ULL;                            \
2665            } else {                                                          \
2666                t.VsrD(0) = 0ULL;                                             \
2667            }                                                                 \
2668        }                                                                     \
2669    } else if ((max &&                                                        \
2670               !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) ||     \
2671               (!max &&                                                       \
2672               float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) {      \
2673        t.VsrD(0) = xa->VsrD(0);                                              \
2674    } else {                                                                  \
2675        t.VsrD(0) = xb->VsrD(0);                                              \
2676    }                                                                         \
2677                                                                              \
2678    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2679    if (vxsnan_flag) {                                                        \
2680        float_invalid_op_vxsnan(env, GETPC());                                \
2681    }                                                                         \
2682    if (!vex_flag) {                                                          \
2683        *xt = t;                                                              \
2684    }                                                                         \
2685}                                                                             \
2686
2687VSX_MAX_MINJ(xsmaxjdp, 1);
2688VSX_MAX_MINJ(xsminjdp, 0);
2689
2690/*
2691 * VSX_CMP - VSX floating point compare
2692 *   op    - instruction mnemonic
2693 *   nels  - number of elements (1, 2 or 4)
2694 *   tp    - type (float32 or float64)
2695 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2696 *   cmp   - comparison operation
2697 *   svxvc - set VXVC bit
2698 *   exp   - expected result of comparison
2699 */
2700#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
2701uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                     \
2702                     ppc_vsr_t *xa, ppc_vsr_t *xb)                        \
2703{                                                                         \
2704    ppc_vsr_t t = *xt;                                                    \
2705    uint32_t crf6 = 0;                                                    \
2706    int i;                                                                \
2707    int all_true = 1;                                                     \
2708    int all_false = 1;                                                    \
2709                                                                          \
2710    for (i = 0; i < nels; i++) {                                          \
2711        if (unlikely(tp##_is_any_nan(xa->fld) ||                          \
2712                     tp##_is_any_nan(xb->fld))) {                         \
2713            if (tp##_is_signaling_nan(xa->fld, &env->fp_status) ||        \
2714                tp##_is_signaling_nan(xb->fld, &env->fp_status)) {        \
2715                float_invalid_op_vxsnan(env, GETPC());                    \
2716            }                                                             \
2717            if (svxvc) {                                                  \
2718                float_invalid_op_vxvc(env, 0, GETPC());                   \
2719            }                                                             \
2720            t.fld = 0;                                                    \
2721            all_true = 0;                                                 \
2722        } else {                                                          \
2723            if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) {   \
2724                t.fld = -1;                                               \
2725                all_false = 0;                                            \
2726            } else {                                                      \
2727                t.fld = 0;                                                \
2728                all_true = 0;                                             \
2729            }                                                             \
2730        }                                                                 \
2731    }                                                                     \
2732                                                                          \
2733    *xt = t;                                                              \
2734    crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);                  \
2735    return crf6;                                                          \
2736}
2737
2738VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2739VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2740VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2741VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2742VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2743VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2744VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2745VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2746
2747/*
2748 * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2749 *   op    - instruction mnemonic
2750 *   nels  - number of elements (1, 2 or 4)
2751 *   stp   - source type (float32 or float64)
2752 *   ttp   - target type (float32 or float64)
2753 *   sfld  - source vsr_t field
2754 *   tfld  - target vsr_t field (f32 or f64)
2755 *   sfprf - set FPRF
2756 */
2757#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2758void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)   \
2759{                                                                  \
2760    ppc_vsr_t t = *xt;                                             \
2761    int i;                                                         \
2762                                                                   \
2763    for (i = 0; i < nels; i++) {                                   \
2764        t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);        \
2765        if (unlikely(stp##_is_signaling_nan(xb->sfld,              \
2766                                            &env->fp_status))) {   \
2767            float_invalid_op_vxsnan(env, GETPC());                 \
2768            t.tfld = ttp##_snan_to_qnan(t.tfld);                   \
2769        }                                                          \
2770        if (sfprf) {                                               \
2771            helper_compute_fprf_##ttp(env, t.tfld);                \
2772        }                                                          \
2773    }                                                              \
2774                                                                   \
2775    *xt = t;                                                       \
2776    do_float_check_status(env, GETPC());                           \
2777}
2778
2779VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2780VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2781VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0)
2782VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
2783
2784/*
2785 * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2786 *   op    - instruction mnemonic
2787 *   nels  - number of elements (1, 2 or 4)
2788 *   stp   - source type (float32 or float64)
2789 *   ttp   - target type (float32 or float64)
2790 *   sfld  - source vsr_t field
2791 *   tfld  - target vsr_t field (f32 or f64)
2792 *   sfprf - set FPRF
2793 */
2794#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2795void helper_##op(CPUPPCState *env, uint32_t opcode,                       \
2796                 ppc_vsr_t *xt, ppc_vsr_t *xb)                            \
2797{                                                                       \
2798    ppc_vsr_t t = *xt;                                                  \
2799    int i;                                                              \
2800                                                                        \
2801    for (i = 0; i < nels; i++) {                                        \
2802        t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);             \
2803        if (unlikely(stp##_is_signaling_nan(xb->sfld,                   \
2804                                            &env->fp_status))) {        \
2805            float_invalid_op_vxsnan(env, GETPC());                      \
2806            t.tfld = ttp##_snan_to_qnan(t.tfld);                        \
2807        }                                                               \
2808        if (sfprf) {                                                    \
2809            helper_compute_fprf_##ttp(env, t.tfld);                     \
2810        }                                                               \
2811    }                                                                   \
2812                                                                        \
2813    *xt = t;                                                            \
2814    do_float_check_status(env, GETPC());                                \
2815}
2816
2817VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2818
2819/*
2820 * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2821 *                       involving one half precision value
2822 *   op    - instruction mnemonic
2823 *   nels  - number of elements (1, 2 or 4)
2824 *   stp   - source type
2825 *   ttp   - target type
2826 *   sfld  - source vsr_t field
2827 *   tfld  - target vsr_t field
2828 *   sfprf - set FPRF
2829 */
2830#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2831void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)   \
2832{                                                                  \
2833    ppc_vsr_t t = { };                                             \
2834    int i;                                                         \
2835                                                                   \
2836    for (i = 0; i < nels; i++) {                                   \
2837        t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status);     \
2838        if (unlikely(stp##_is_signaling_nan(xb->sfld,              \
2839                                            &env->fp_status))) {   \
2840            float_invalid_op_vxsnan(env, GETPC());                 \
2841            t.tfld = ttp##_snan_to_qnan(t.tfld);                   \
2842        }                                                          \
2843        if (sfprf) {                                               \
2844            helper_compute_fprf_##ttp(env, t.tfld);                \
2845        }                                                          \
2846    }                                                              \
2847                                                                   \
2848    *xt = t;                                                       \
2849    do_float_check_status(env, GETPC());                           \
2850}
2851
2852VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2853VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2854VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i  + 1), 0)
2855VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2856
2857/*
2858 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2859 * added to this later.
2860 */
2861void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode,
2862                     ppc_vsr_t *xt, ppc_vsr_t *xb)
2863{
2864    ppc_vsr_t t = { };
2865    float_status tstat;
2866
2867    tstat = env->fp_status;
2868    if (unlikely(Rc(opcode) != 0)) {
2869        tstat.float_rounding_mode = float_round_to_odd;
2870    }
2871
2872    t.VsrD(0) = float128_to_float64(xb->f128, &tstat);
2873    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2874    if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) {
2875        float_invalid_op_vxsnan(env, GETPC());
2876        t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0));
2877    }
2878    helper_compute_fprf_float64(env, t.VsrD(0));
2879
2880    *xt = t;
2881    do_float_check_status(env, GETPC());
2882}
2883
2884uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2885{
2886    uint64_t result, sign, exp, frac;
2887
2888    float_status tstat = env->fp_status;
2889    set_float_exception_flags(0, &tstat);
2890
2891    sign = extract64(xb, 63,  1);
2892    exp  = extract64(xb, 52, 11);
2893    frac = extract64(xb,  0, 52) | 0x10000000000000ULL;
2894
2895    if (unlikely(exp == 0 && extract64(frac, 0, 52) != 0)) {
2896        /* DP denormal operand.  */
2897        /* Exponent override to DP min exp.  */
2898        exp = 1;
2899        /* Implicit bit override to 0.  */
2900        frac = deposit64(frac, 53, 1, 0);
2901    }
2902
2903    if (unlikely(exp < 897 && frac != 0)) {
2904        /* SP tiny operand.  */
2905        if (897 - exp > 63) {
2906            frac = 0;
2907        } else {
2908            /* Denormalize until exp = SP min exp.  */
2909            frac >>= (897 - exp);
2910        }
2911        /* Exponent override to SP min exp - 1.  */
2912        exp = 896;
2913    }
2914
2915    result = sign << 31;
2916    result |= extract64(exp, 10, 1) << 30;
2917    result |= extract64(exp, 0, 7) << 23;
2918    result |= extract64(frac, 29, 23);
2919
2920    /* hardware replicates result to both words of the doubleword result.  */
2921    return (result << 32) | result;
2922}
2923
2924uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2925{
2926    float_status tstat = env->fp_status;
2927    set_float_exception_flags(0, &tstat);
2928
2929    return float32_to_float64(xb >> 32, &tstat);
2930}
2931
2932/*
2933 * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2934 *   op    - instruction mnemonic
2935 *   nels  - number of elements (1, 2 or 4)
2936 *   stp   - source type (float32 or float64)
2937 *   ttp   - target type (int32, uint32, int64 or uint64)
2938 *   sfld  - source vsr_t field
2939 *   tfld  - target vsr_t field
2940 *   rnan  - resulting NaN
2941 */
2942#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
2943void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2944{                                                                            \
2945    int all_flags = env->fp_status.float_exception_flags, flags;             \
2946    ppc_vsr_t t = *xt;                                                       \
2947    int i;                                                                   \
2948                                                                             \
2949    for (i = 0; i < nels; i++) {                                             \
2950        env->fp_status.float_exception_flags = 0;                            \
2951        t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status);  \
2952        flags = env->fp_status.float_exception_flags;                        \
2953        if (unlikely(flags & float_flag_invalid)) {                          \
2954            float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld));    \
2955            t.tfld = rnan;                                                   \
2956        }                                                                    \
2957        all_flags |= flags;                                                  \
2958    }                                                                        \
2959                                                                             \
2960    *xt = t;                                                                 \
2961    env->fp_status.float_exception_flags = all_flags;                        \
2962    do_float_check_status(env, GETPC());                                     \
2963}
2964
2965VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2966                  0x8000000000000000ULL)
2967VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
2968                  0x80000000U)
2969VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
2970VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
2971VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
2972                  0x8000000000000000ULL)
2973VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2 * i), \
2974                  0x80000000U)
2975VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
2976VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2 * i), 0U)
2977VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), \
2978                  0x8000000000000000ULL)
2979VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
2980VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL)
2981VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
2982
2983/*
2984 * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2985 *   op    - instruction mnemonic
2986 *   stp   - source type (float32 or float64)
2987 *   ttp   - target type (int32, uint32, int64 or uint64)
2988 *   sfld  - source vsr_t field
2989 *   tfld  - target vsr_t field
2990 *   rnan  - resulting NaN
2991 */
2992#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan)             \
2993void helper_##op(CPUPPCState *env, uint32_t opcode,                          \
2994                 ppc_vsr_t *xt, ppc_vsr_t *xb)                               \
2995{                                                                            \
2996    ppc_vsr_t t = { };                                                       \
2997                                                                             \
2998    t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status);      \
2999    if (env->fp_status.float_exception_flags & float_flag_invalid) {         \
3000        float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld));        \
3001        t.tfld = rnan;                                                       \
3002    }                                                                        \
3003                                                                             \
3004    *xt = t;                                                                 \
3005    do_float_check_status(env, GETPC());                                     \
3006}
3007
3008VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0),          \
3009                  0x8000000000000000ULL)
3010
3011VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0),          \
3012                  0xffffffff80000000ULL)
3013VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
3014VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
3015
3016/*
3017 * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
3018 *   op    - instruction mnemonic
3019 *   nels  - number of elements (1, 2 or 4)
3020 *   stp   - source type (int32, uint32, int64 or uint64)
3021 *   ttp   - target type (float32 or float64)
3022 *   sfld  - source vsr_t field
3023 *   tfld  - target vsr_t field
3024 *   jdef  - definition of the j index (i or 2*i)
3025 *   sfprf - set FPRF
3026 */
3027#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
3028void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)        \
3029{                                                                       \
3030    ppc_vsr_t t = *xt;                                                  \
3031    int i;                                                              \
3032                                                                        \
3033    for (i = 0; i < nels; i++) {                                        \
3034        t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);             \
3035        if (r2sp) {                                                     \
3036            t.tfld = helper_frsp(env, t.tfld);                          \
3037        }                                                               \
3038        if (sfprf) {                                                    \
3039            helper_compute_fprf_float64(env, t.tfld);                   \
3040        }                                                               \
3041    }                                                                   \
3042                                                                        \
3043    *xt = t;                                                            \
3044    do_float_check_status(env, GETPC());                                \
3045}
3046
3047VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3048VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3049VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3050VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3051VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3052VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3053VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
3054VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
3055VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2 * i), 0, 0)
3056VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2 * i), 0, 0)
3057VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3058VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3059
3060/*
3061 * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3062 *   op    - instruction mnemonic
3063 *   stp   - source type (int32, uint32, int64 or uint64)
3064 *   ttp   - target type (float32 or float64)
3065 *   sfld  - source vsr_t field
3066 *   tfld  - target vsr_t field
3067 */
3068#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld)              \
3069void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
3070                 ppc_vsr_t *xt, ppc_vsr_t *xb)                          \
3071{                                                                       \
3072    ppc_vsr_t t = *xt;                                                  \
3073                                                                        \
3074    t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);                 \
3075    helper_compute_fprf_##ttp(env, t.tfld);                             \
3076                                                                        \
3077    *xt = t;                                                            \
3078    do_float_check_status(env, GETPC());                                \
3079}
3080
3081VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3082VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3083
3084/*
3085 * For "use current rounding mode", define a value that will not be
3086 * one of the existing rounding model enums.
3087 */
3088#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3089  float_round_up + float_round_to_zero)
3090
3091/*
3092 * VSX_ROUND - VSX floating point round
3093 *   op    - instruction mnemonic
3094 *   nels  - number of elements (1, 2 or 4)
3095 *   tp    - type (float32 or float64)
3096 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3097 *   rmode - rounding mode
3098 *   sfprf - set FPRF
3099 */
3100#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf)                     \
3101void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)       \
3102{                                                                      \
3103    ppc_vsr_t t = *xt;                                                 \
3104    int i;                                                             \
3105                                                                       \
3106    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3107        set_float_rounding_mode(rmode, &env->fp_status);               \
3108    }                                                                  \
3109                                                                       \
3110    for (i = 0; i < nels; i++) {                                       \
3111        if (unlikely(tp##_is_signaling_nan(xb->fld,                    \
3112                                           &env->fp_status))) {        \
3113            float_invalid_op_vxsnan(env, GETPC());                     \
3114            t.fld = tp##_snan_to_qnan(xb->fld);                        \
3115        } else {                                                       \
3116            t.fld = tp##_round_to_int(xb->fld, &env->fp_status);       \
3117        }                                                              \
3118        if (sfprf) {                                                   \
3119            helper_compute_fprf_float64(env, t.fld);                   \
3120        }                                                              \
3121    }                                                                  \
3122                                                                       \
3123    /*                                                                 \
3124     * If this is not a "use current rounding mode" instruction,       \
3125     * then inhibit setting of the XX bit and restore rounding         \
3126     * mode from FPSCR                                                 \
3127     */                                                                \
3128    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3129        fpscr_set_rounding_mode(env);                                  \
3130        env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
3131    }                                                                  \
3132                                                                       \
3133    *xt = t;                                                           \
3134    do_float_check_status(env, GETPC());                               \
3135}
3136
3137VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3138VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3139VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3140VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3141VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3142
3143VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3144VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3145VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3146VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3147VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3148
3149VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3150VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3151VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3152VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3153VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3154
3155uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3156{
3157    helper_reset_fpstatus(env);
3158
3159    uint64_t xt = helper_frsp(env, xb);
3160
3161    helper_compute_fprf_float64(env, xt);
3162    do_float_check_status(env, GETPC());
3163    return xt;
3164}
3165
3166#define VSX_XXPERM(op, indexed)                                       \
3167void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                     \
3168                 ppc_vsr_t *xa, ppc_vsr_t *pcv)                       \
3169{                                                                     \
3170    ppc_vsr_t t = *xt;                                                \
3171    int i, idx;                                                       \
3172                                                                      \
3173    for (i = 0; i < 16; i++) {                                        \
3174        idx = pcv->VsrB(i) & 0x1F;                                    \
3175        if (indexed) {                                                \
3176            idx = 31 - idx;                                           \
3177        }                                                             \
3178        t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx)                       \
3179                                : xt->VsrB(idx - 16);                 \
3180    }                                                                 \
3181    *xt = t;                                                          \
3182}
3183
3184VSX_XXPERM(xxperm, 0)
3185VSX_XXPERM(xxpermr, 1)
3186
3187void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
3188{
3189    ppc_vsr_t t = { };
3190    uint32_t exp, i, fraction;
3191
3192    for (i = 0; i < 4; i++) {
3193        exp = (xb->VsrW(i) >> 23) & 0xFF;
3194        fraction = xb->VsrW(i) & 0x7FFFFF;
3195        if (exp != 0 && exp != 255) {
3196            t.VsrW(i) = fraction | 0x00800000;
3197        } else {
3198            t.VsrW(i) = fraction;
3199        }
3200    }
3201    *xt = t;
3202}
3203
3204/*
3205 * VSX_TEST_DC - VSX floating point test data class
3206 *   op    - instruction mnemonic
3207 *   nels  - number of elements (1, 2 or 4)
3208 *   xbn   - VSR register number
3209 *   tp    - type (float32 or float64)
3210 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3211 *   tfld   - target vsr_t field (VsrD(*) or VsrW(*))
3212 *   fld_max - target field max
3213 *   scrf - set result in CR and FPCC
3214 */
3215#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf)  \
3216void helper_##op(CPUPPCState *env, uint32_t opcode)         \
3217{                                                           \
3218    ppc_vsr_t *xt = &env->vsr[xT(opcode)];                  \
3219    ppc_vsr_t *xb = &env->vsr[xbn];                         \
3220    ppc_vsr_t t = { };                                      \
3221    uint32_t i, sign, dcmx;                                 \
3222    uint32_t cc, match = 0;                                 \
3223                                                            \
3224    if (!scrf) {                                            \
3225        dcmx = DCMX_XV(opcode);                             \
3226    } else {                                                \
3227        t = *xt;                                            \
3228        dcmx = DCMX(opcode);                                \
3229    }                                                       \
3230                                                            \
3231    for (i = 0; i < nels; i++) {                            \
3232        sign = tp##_is_neg(xb->fld);                        \
3233        if (tp##_is_any_nan(xb->fld)) {                     \
3234            match = extract32(dcmx, 6, 1);                  \
3235        } else if (tp##_is_infinity(xb->fld)) {             \
3236            match = extract32(dcmx, 4 + !sign, 1);          \
3237        } else if (tp##_is_zero(xb->fld)) {                 \
3238            match = extract32(dcmx, 2 + !sign, 1);          \
3239        } else if (tp##_is_zero_or_denormal(xb->fld)) {     \
3240            match = extract32(dcmx, 0 + !sign, 1);          \
3241        }                                                   \
3242                                                            \
3243        if (scrf) {                                         \
3244            cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT;  \
3245            env->fpscr &= ~FP_FPCC;                         \
3246            env->fpscr |= cc << FPSCR_FPCC;                 \
3247            env->crf[BF(opcode)] = cc;                      \
3248        } else {                                            \
3249            t.tfld = match ? fld_max : 0;                   \
3250        }                                                   \
3251        match = 0;                                          \
3252    }                                                       \
3253    if (!scrf) {                                            \
3254        *xt = t;                                            \
3255    }                                                       \
3256}
3257
3258VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3259VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3260VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3261VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3262
3263void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)
3264{
3265    uint32_t dcmx, sign, exp;
3266    uint32_t cc, match = 0, not_sp = 0;
3267
3268    dcmx = DCMX(opcode);
3269    exp = (xb->VsrD(0) >> 52) & 0x7FF;
3270
3271    sign = float64_is_neg(xb->VsrD(0));
3272    if (float64_is_any_nan(xb->VsrD(0))) {
3273        match = extract32(dcmx, 6, 1);
3274    } else if (float64_is_infinity(xb->VsrD(0))) {
3275        match = extract32(dcmx, 4 + !sign, 1);
3276    } else if (float64_is_zero(xb->VsrD(0))) {
3277        match = extract32(dcmx, 2 + !sign, 1);
3278    } else if (float64_is_zero_or_denormal(xb->VsrD(0)) ||
3279               (exp > 0 && exp < 0x381)) {
3280        match = extract32(dcmx, 0 + !sign, 1);
3281    }
3282
3283    not_sp = !float64_eq(xb->VsrD(0),
3284                         float32_to_float64(
3285                             float64_to_float32(xb->VsrD(0), &env->fp_status),
3286                             &env->fp_status), &env->fp_status);
3287
3288    cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3289    env->fpscr &= ~FP_FPCC;
3290    env->fpscr |= cc << FPSCR_FPCC;
3291    env->crf[BF(opcode)] = cc;
3292}
3293
3294void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
3295                   ppc_vsr_t *xt, ppc_vsr_t *xb)
3296{
3297    ppc_vsr_t t = { };
3298    uint8_t r = Rrm(opcode);
3299    uint8_t ex = Rc(opcode);
3300    uint8_t rmc = RMC(opcode);
3301    uint8_t rmode = 0;
3302    float_status tstat;
3303
3304    helper_reset_fpstatus(env);
3305
3306    if (r == 0 && rmc == 0) {
3307        rmode = float_round_ties_away;
3308    } else if (r == 0 && rmc == 0x3) {
3309        rmode = fpscr_rn;
3310    } else if (r == 1) {
3311        switch (rmc) {
3312        case 0:
3313            rmode = float_round_nearest_even;
3314            break;
3315        case 1:
3316            rmode = float_round_to_zero;
3317            break;
3318        case 2:
3319            rmode = float_round_up;
3320            break;
3321        case 3:
3322            rmode = float_round_down;
3323            break;
3324        default:
3325            abort();
3326        }
3327    }
3328
3329    tstat = env->fp_status;
3330    set_float_exception_flags(0, &tstat);
3331    set_float_rounding_mode(rmode, &tstat);
3332    t.f128 = float128_round_to_int(xb->f128, &tstat);
3333    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3334
3335    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3336        if (float128_is_signaling_nan(xb->f128, &tstat)) {
3337            float_invalid_op_vxsnan(env, GETPC());
3338            t.f128 = float128_snan_to_qnan(t.f128);
3339        }
3340    }
3341
3342    if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3343        env->fp_status.float_exception_flags &= ~float_flag_inexact;
3344    }
3345
3346    helper_compute_fprf_float128(env, t.f128);
3347    do_float_check_status(env, GETPC());
3348    *xt = t;
3349}
3350
3351void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
3352                    ppc_vsr_t *xt, ppc_vsr_t *xb)
3353{
3354    ppc_vsr_t t = { };
3355    uint8_t r = Rrm(opcode);
3356    uint8_t rmc = RMC(opcode);
3357    uint8_t rmode = 0;
3358    floatx80 round_res;
3359    float_status tstat;
3360
3361    helper_reset_fpstatus(env);
3362
3363    if (r == 0 && rmc == 0) {
3364        rmode = float_round_ties_away;
3365    } else if (r == 0 && rmc == 0x3) {
3366        rmode = fpscr_rn;
3367    } else if (r == 1) {
3368        switch (rmc) {
3369        case 0:
3370            rmode = float_round_nearest_even;
3371            break;
3372        case 1:
3373            rmode = float_round_to_zero;
3374            break;
3375        case 2:
3376            rmode = float_round_up;
3377            break;
3378        case 3:
3379            rmode = float_round_down;
3380            break;
3381        default:
3382            abort();
3383        }
3384    }
3385
3386    tstat = env->fp_status;
3387    set_float_exception_flags(0, &tstat);
3388    set_float_rounding_mode(rmode, &tstat);
3389    round_res = float128_to_floatx80(xb->f128, &tstat);
3390    t.f128 = floatx80_to_float128(round_res, &tstat);
3391    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3392
3393    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3394        if (float128_is_signaling_nan(xb->f128, &tstat)) {
3395            float_invalid_op_vxsnan(env, GETPC());
3396            t.f128 = float128_snan_to_qnan(t.f128);
3397        }
3398    }
3399
3400    helper_compute_fprf_float128(env, t.f128);
3401    *xt = t;
3402    do_float_check_status(env, GETPC());
3403}
3404
3405void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
3406                     ppc_vsr_t *xt, ppc_vsr_t *xb)
3407{
3408    ppc_vsr_t t = { };
3409    float_status tstat;
3410
3411    helper_reset_fpstatus(env);
3412
3413    tstat = env->fp_status;
3414    if (unlikely(Rc(opcode) != 0)) {
3415        tstat.float_rounding_mode = float_round_to_odd;
3416    }
3417
3418    set_float_exception_flags(0, &tstat);
3419    t.f128 = float128_sqrt(xb->f128, &tstat);
3420    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3421
3422    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3423        if (float128_is_signaling_nan(xb->f128, &tstat)) {
3424            float_invalid_op_vxsnan(env, GETPC());
3425            t.f128 = float128_snan_to_qnan(xb->f128);
3426        } else if (float128_is_quiet_nan(xb->f128, &tstat)) {
3427            t.f128 = xb->f128;
3428        } else if (float128_is_neg(xb->f128) && !float128_is_zero(xb->f128)) {
3429            float_invalid_op_vxsqrt(env, 1, GETPC());
3430            t.f128 = float128_default_nan(&env->fp_status);
3431        }
3432    }
3433
3434    helper_compute_fprf_float128(env, t.f128);
3435    *xt = t;
3436    do_float_check_status(env, GETPC());
3437}
3438
3439void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
3440                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
3441{
3442    ppc_vsr_t t = *xt;
3443    float_status tstat;
3444
3445    helper_reset_fpstatus(env);
3446
3447    tstat = env->fp_status;
3448    if (unlikely(Rc(opcode) != 0)) {
3449        tstat.float_rounding_mode = float_round_to_odd;
3450    }
3451
3452    set_float_exception_flags(0, &tstat);
3453    t.f128 = float128_sub(xa->f128, xb->f128, &tstat);
3454    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3455
3456    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3457        float_invalid_op_addsub(env, 1, GETPC(),
3458                                float128_classify(xa->f128) |
3459                                float128_classify(xb->f128));
3460    }
3461
3462    helper_compute_fprf_float128(env, t.f128);
3463    *xt = t;
3464    do_float_check_status(env, GETPC());
3465}
3466