qemu/target/ppc/fpu_helper.c
<<
>>
Prefs
   1/*
   2 *  PowerPC floating point and SPE emulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "exec/helper-proto.h"
  22#include "exec/exec-all.h"
  23#include "internal.h"
  24#include "fpu/softfloat.h"
  25
  26static inline float128 float128_snan_to_qnan(float128 x)
  27{
  28    float128 r;
  29
  30    r.high = x.high | 0x0000800000000000;
  31    r.low = x.low;
  32    return r;
  33}
  34
  35#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
  36#define float32_snan_to_qnan(x) ((x) | 0x00400000)
  37#define float16_snan_to_qnan(x) ((x) | 0x0200)
  38
  39static inline bool fp_exceptions_enabled(CPUPPCState *env)
  40{
  41#ifdef CONFIG_USER_ONLY
  42    return true;
  43#else
  44    return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0;
  45#endif
  46}
  47
  48/*****************************************************************************/
  49/* Floating point operations helpers */
  50
  51/*
  52 * This is the non-arithmatic conversion that happens e.g. on loads.
  53 * In the Power ISA pseudocode, this is called DOUBLE.
  54 */
  55uint64_t helper_todouble(uint32_t arg)
  56{
  57    uint32_t abs_arg = arg & 0x7fffffff;
  58    uint64_t ret;
  59
  60    if (likely(abs_arg >= 0x00800000)) {
  61        if (unlikely(extract32(arg, 23, 8) == 0xff)) {
  62            /* Inf or NAN.  */
  63            ret  = (uint64_t)extract32(arg, 31, 1) << 63;
  64            ret |= (uint64_t)0x7ff << 52;
  65            ret |= (uint64_t)extract32(arg, 0, 23) << 29;
  66        } else {
  67            /* Normalized operand.  */
  68            ret  = (uint64_t)extract32(arg, 30, 2) << 62;
  69            ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
  70            ret |= (uint64_t)extract32(arg, 0, 30) << 29;
  71        }
  72    } else {
  73        /* Zero or Denormalized operand.  */
  74        ret = (uint64_t)extract32(arg, 31, 1) << 63;
  75        if (unlikely(abs_arg != 0)) {
  76            /*
  77             * Denormalized operand.
  78             * Shift fraction so that the msb is in the implicit bit position.
  79             * Thus, shift is in the range [1:23].
  80             */
  81            int shift = clz32(abs_arg) - 8;
  82            /*
  83             * The first 3 terms compute the float64 exponent.  We then bias
  84             * this result by -1 so that we can swallow the implicit bit below.
  85             */
  86            int exp = -126 - shift + 1023 - 1;
  87
  88            ret |= (uint64_t)exp << 52;
  89            ret += (uint64_t)abs_arg << (52 - 23 + shift);
  90        }
  91    }
  92    return ret;
  93}
  94
  95/*
  96 * This is the non-arithmatic conversion that happens e.g. on stores.
  97 * In the Power ISA pseudocode, this is called SINGLE.
  98 */
  99uint32_t helper_tosingle(uint64_t arg)
 100{
 101    int exp = extract64(arg, 52, 11);
 102    uint32_t ret;
 103
 104    if (likely(exp > 896)) {
 105        /* No denormalization required (includes Inf, NaN).  */
 106        ret  = extract64(arg, 62, 2) << 30;
 107        ret |= extract64(arg, 29, 30);
 108    } else {
 109        /*
 110         * Zero or Denormal result.  If the exponent is in bounds for
 111         * a single-precision denormal result, extract the proper
 112         * bits.  If the input is not zero, and the exponent is out of
 113         * bounds, then the result is undefined; this underflows to
 114         * zero.
 115         */
 116        ret = extract64(arg, 63, 1) << 31;
 117        if (unlikely(exp >= 874)) {
 118            /* Denormal result.  */
 119            ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp);
 120        }
 121    }
 122    return ret;
 123}
 124
 125static inline int ppc_float32_get_unbiased_exp(float32 f)
 126{
 127    return ((f >> 23) & 0xFF) - 127;
 128}
 129
 130static inline int ppc_float64_get_unbiased_exp(float64 f)
 131{
 132    return ((f >> 52) & 0x7FF) - 1023;
 133}
 134
 135/* Classify a floating-point number.  */
 136enum {
 137    is_normal   = 1,
 138    is_zero     = 2,
 139    is_denormal = 4,
 140    is_inf      = 8,
 141    is_qnan     = 16,
 142    is_snan     = 32,
 143    is_neg      = 64,
 144};
 145
 146#define COMPUTE_CLASS(tp)                                      \
 147static int tp##_classify(tp arg)                               \
 148{                                                              \
 149    int ret = tp##_is_neg(arg) * is_neg;                       \
 150    if (unlikely(tp##_is_any_nan(arg))) {                      \
 151        float_status dummy = { };  /* snan_bit_is_one = 0 */   \
 152        ret |= (tp##_is_signaling_nan(arg, &dummy)             \
 153                ? is_snan : is_qnan);                          \
 154    } else if (unlikely(tp##_is_infinity(arg))) {              \
 155        ret |= is_inf;                                         \
 156    } else if (tp##_is_zero(arg)) {                            \
 157        ret |= is_zero;                                        \
 158    } else if (tp##_is_zero_or_denormal(arg)) {                \
 159        ret |= is_denormal;                                    \
 160    } else {                                                   \
 161        ret |= is_normal;                                      \
 162    }                                                          \
 163    return ret;                                                \
 164}
 165
 166COMPUTE_CLASS(float16)
 167COMPUTE_CLASS(float32)
 168COMPUTE_CLASS(float64)
 169COMPUTE_CLASS(float128)
 170
 171static void set_fprf_from_class(CPUPPCState *env, int class)
 172{
 173    static const uint8_t fprf[6][2] = {
 174        { 0x04, 0x08 },  /* normalized */
 175        { 0x02, 0x12 },  /* zero */
 176        { 0x14, 0x18 },  /* denormalized */
 177        { 0x05, 0x09 },  /* infinity */
 178        { 0x11, 0x11 },  /* qnan */
 179        { 0x00, 0x00 },  /* snan -- flags are undefined */
 180    };
 181    bool isneg = class & is_neg;
 182
 183    env->fpscr &= ~FP_FPRF;
 184    env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF;
 185}
 186
 187#define COMPUTE_FPRF(tp)                                \
 188void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
 189{                                                       \
 190    set_fprf_from_class(env, tp##_classify(arg));       \
 191}
 192
 193COMPUTE_FPRF(float16)
 194COMPUTE_FPRF(float32)
 195COMPUTE_FPRF(float64)
 196COMPUTE_FPRF(float128)
 197
 198/* Floating-point invalid operations exception */
 199static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
 200{
 201    /* Update the floating-point invalid operation summary */
 202    env->fpscr |= FP_VX;
 203    /* Update the floating-point exception summary */
 204    env->fpscr |= FP_FX;
 205    if (fpscr_ve != 0) {
 206        /* Update the floating-point enabled exception summary */
 207        env->fpscr |= FP_FEX;
 208        if (fp_exceptions_enabled(env)) {
 209            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 210                                   POWERPC_EXCP_FP | op, retaddr);
 211        }
 212    }
 213}
 214
 215static void finish_invalid_op_arith(CPUPPCState *env, int op,
 216                                    bool set_fpcc, uintptr_t retaddr)
 217{
 218    env->fpscr &= ~(FP_FR | FP_FI);
 219    if (fpscr_ve == 0) {
 220        if (set_fpcc) {
 221            env->fpscr &= ~FP_FPCC;
 222            env->fpscr |= (FP_C | FP_FU);
 223        }
 224    }
 225    finish_invalid_op_excp(env, op, retaddr);
 226}
 227
 228/* Signalling NaN */
 229static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr)
 230{
 231    env->fpscr |= FP_VXSNAN;
 232    finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr);
 233}
 234
 235/* Magnitude subtraction of infinities */
 236static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc,
 237                                   uintptr_t retaddr)
 238{
 239    env->fpscr |= FP_VXISI;
 240    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr);
 241}
 242
 243/* Division of infinity by infinity */
 244static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc,
 245                                   uintptr_t retaddr)
 246{
 247    env->fpscr |= FP_VXIDI;
 248    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr);
 249}
 250
 251/* Division of zero by zero */
 252static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc,
 253                                   uintptr_t retaddr)
 254{
 255    env->fpscr |= FP_VXZDZ;
 256    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr);
 257}
 258
 259/* Multiplication of zero by infinity */
 260static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc,
 261                                   uintptr_t retaddr)
 262{
 263    env->fpscr |= FP_VXIMZ;
 264    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr);
 265}
 266
 267/* Square root of a negative number */
 268static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc,
 269                                    uintptr_t retaddr)
 270{
 271    env->fpscr |= FP_VXSQRT;
 272    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr);
 273}
 274
 275/* Ordered comparison of NaN */
 276static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
 277                                  uintptr_t retaddr)
 278{
 279    env->fpscr |= FP_VXVC;
 280    if (set_fpcc) {
 281        env->fpscr &= ~FP_FPCC;
 282        env->fpscr |= (FP_C | FP_FU);
 283    }
 284    /* Update the floating-point invalid operation summary */
 285    env->fpscr |= FP_VX;
 286    /* Update the floating-point exception summary */
 287    env->fpscr |= FP_FX;
 288    /* We must update the target FPR before raising the exception */
 289    if (fpscr_ve != 0) {
 290        CPUState *cs = env_cpu(env);
 291
 292        cs->exception_index = POWERPC_EXCP_PROGRAM;
 293        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
 294        /* Update the floating-point enabled exception summary */
 295        env->fpscr |= FP_FEX;
 296        /* Exception is deferred */
 297    }
 298}
 299
 300/* Invalid conversion */
 301static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
 302                                   uintptr_t retaddr)
 303{
 304    env->fpscr |= FP_VXCVI;
 305    env->fpscr &= ~(FP_FR | FP_FI);
 306    if (fpscr_ve == 0) {
 307        if (set_fpcc) {
 308            env->fpscr &= ~FP_FPCC;
 309            env->fpscr |= (FP_C | FP_FU);
 310        }
 311    }
 312    finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr);
 313}
 314
 315static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
 316{
 317    env->fpscr |= FP_ZX;
 318    env->fpscr &= ~(FP_FR | FP_FI);
 319    /* Update the floating-point exception summary */
 320    env->fpscr |= FP_FX;
 321    if (fpscr_ze != 0) {
 322        /* Update the floating-point enabled exception summary */
 323        env->fpscr |= FP_FEX;
 324        if (fp_exceptions_enabled(env)) {
 325            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 326                                   POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
 327                                   raddr);
 328        }
 329    }
 330}
 331
 332static inline void float_overflow_excp(CPUPPCState *env)
 333{
 334    CPUState *cs = env_cpu(env);
 335
 336    env->fpscr |= FP_OX;
 337    /* Update the floating-point exception summary */
 338    env->fpscr |= FP_FX;
 339    if (fpscr_oe != 0) {
 340        /* XXX: should adjust the result */
 341        /* Update the floating-point enabled exception summary */
 342        env->fpscr |= FP_FEX;
 343        /* We must update the target FPR before raising the exception */
 344        cs->exception_index = POWERPC_EXCP_PROGRAM;
 345        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 346    } else {
 347        env->fpscr |= FP_XX;
 348        env->fpscr |= FP_FI;
 349    }
 350}
 351
 352static inline void float_underflow_excp(CPUPPCState *env)
 353{
 354    CPUState *cs = env_cpu(env);
 355
 356    env->fpscr |= FP_UX;
 357    /* Update the floating-point exception summary */
 358    env->fpscr |= FP_FX;
 359    if (fpscr_ue != 0) {
 360        /* XXX: should adjust the result */
 361        /* Update the floating-point enabled exception summary */
 362        env->fpscr |= FP_FEX;
 363        /* We must update the target FPR before raising the exception */
 364        cs->exception_index = POWERPC_EXCP_PROGRAM;
 365        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 366    }
 367}
 368
 369static inline void float_inexact_excp(CPUPPCState *env)
 370{
 371    CPUState *cs = env_cpu(env);
 372
 373    env->fpscr |= FP_FI;
 374    env->fpscr |= FP_XX;
 375    /* Update the floating-point exception summary */
 376    env->fpscr |= FP_FX;
 377    if (fpscr_xe != 0) {
 378        /* Update the floating-point enabled exception summary */
 379        env->fpscr |= FP_FEX;
 380        /* We must update the target FPR before raising the exception */
 381        cs->exception_index = POWERPC_EXCP_PROGRAM;
 382        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 383    }
 384}
 385
 386static inline void fpscr_set_rounding_mode(CPUPPCState *env)
 387{
 388    int rnd_type;
 389
 390    /* Set rounding mode */
 391    switch (fpscr_rn) {
 392    case 0:
 393        /* Best approximation (round to nearest) */
 394        rnd_type = float_round_nearest_even;
 395        break;
 396    case 1:
 397        /* Smaller magnitude (round toward zero) */
 398        rnd_type = float_round_to_zero;
 399        break;
 400    case 2:
 401        /* Round toward +infinite */
 402        rnd_type = float_round_up;
 403        break;
 404    default:
 405    case 3:
 406        /* Round toward -infinite */
 407        rnd_type = float_round_down;
 408        break;
 409    }
 410    set_float_rounding_mode(rnd_type, &env->fp_status);
 411}
 412
 413void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
 414{
 415    int prev;
 416
 417    prev = (env->fpscr >> bit) & 1;
 418    env->fpscr &= ~(1 << bit);
 419    if (prev == 1) {
 420        switch (bit) {
 421        case FPSCR_RN1:
 422        case FPSCR_RN0:
 423            fpscr_set_rounding_mode(env);
 424            break;
 425        case FPSCR_VXSNAN:
 426        case FPSCR_VXISI:
 427        case FPSCR_VXIDI:
 428        case FPSCR_VXZDZ:
 429        case FPSCR_VXIMZ:
 430        case FPSCR_VXVC:
 431        case FPSCR_VXSOFT:
 432        case FPSCR_VXSQRT:
 433        case FPSCR_VXCVI:
 434            if (!fpscr_ix) {
 435                /* Set VX bit to zero */
 436                env->fpscr &= ~FP_VX;
 437            }
 438            break;
 439        case FPSCR_OX:
 440        case FPSCR_UX:
 441        case FPSCR_ZX:
 442        case FPSCR_XX:
 443        case FPSCR_VE:
 444        case FPSCR_OE:
 445        case FPSCR_UE:
 446        case FPSCR_ZE:
 447        case FPSCR_XE:
 448            if (!fpscr_eex) {
 449                /* Set the FEX bit */
 450                env->fpscr &= ~FP_FEX;
 451            }
 452            break;
 453        default:
 454            break;
 455        }
 456    }
 457}
 458
 459void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
 460{
 461    CPUState *cs = env_cpu(env);
 462    int prev;
 463
 464    prev = (env->fpscr >> bit) & 1;
 465    env->fpscr |= 1 << bit;
 466    if (prev == 0) {
 467        switch (bit) {
 468        case FPSCR_VX:
 469            env->fpscr |= FP_FX;
 470            if (fpscr_ve) {
 471                goto raise_ve;
 472            }
 473            break;
 474        case FPSCR_OX:
 475            env->fpscr |= FP_FX;
 476            if (fpscr_oe) {
 477                goto raise_oe;
 478            }
 479            break;
 480        case FPSCR_UX:
 481            env->fpscr |= FP_FX;
 482            if (fpscr_ue) {
 483                goto raise_ue;
 484            }
 485            break;
 486        case FPSCR_ZX:
 487            env->fpscr |= FP_FX;
 488            if (fpscr_ze) {
 489                goto raise_ze;
 490            }
 491            break;
 492        case FPSCR_XX:
 493            env->fpscr |= FP_FX;
 494            if (fpscr_xe) {
 495                goto raise_xe;
 496            }
 497            break;
 498        case FPSCR_VXSNAN:
 499        case FPSCR_VXISI:
 500        case FPSCR_VXIDI:
 501        case FPSCR_VXZDZ:
 502        case FPSCR_VXIMZ:
 503        case FPSCR_VXVC:
 504        case FPSCR_VXSOFT:
 505        case FPSCR_VXSQRT:
 506        case FPSCR_VXCVI:
 507            env->fpscr |= FP_VX;
 508            env->fpscr |= FP_FX;
 509            if (fpscr_ve != 0) {
 510                goto raise_ve;
 511            }
 512            break;
 513        case FPSCR_VE:
 514            if (fpscr_vx != 0) {
 515            raise_ve:
 516                env->error_code = POWERPC_EXCP_FP;
 517                if (fpscr_vxsnan) {
 518                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
 519                }
 520                if (fpscr_vxisi) {
 521                    env->error_code |= POWERPC_EXCP_FP_VXISI;
 522                }
 523                if (fpscr_vxidi) {
 524                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
 525                }
 526                if (fpscr_vxzdz) {
 527                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
 528                }
 529                if (fpscr_vximz) {
 530                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
 531                }
 532                if (fpscr_vxvc) {
 533                    env->error_code |= POWERPC_EXCP_FP_VXVC;
 534                }
 535                if (fpscr_vxsoft) {
 536                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
 537                }
 538                if (fpscr_vxsqrt) {
 539                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
 540                }
 541                if (fpscr_vxcvi) {
 542                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
 543                }
 544                goto raise_excp;
 545            }
 546            break;
 547        case FPSCR_OE:
 548            if (fpscr_ox != 0) {
 549            raise_oe:
 550                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 551                goto raise_excp;
 552            }
 553            break;
 554        case FPSCR_UE:
 555            if (fpscr_ux != 0) {
 556            raise_ue:
 557                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 558                goto raise_excp;
 559            }
 560            break;
 561        case FPSCR_ZE:
 562            if (fpscr_zx != 0) {
 563            raise_ze:
 564                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
 565                goto raise_excp;
 566            }
 567            break;
 568        case FPSCR_XE:
 569            if (fpscr_xx != 0) {
 570            raise_xe:
 571                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 572                goto raise_excp;
 573            }
 574            break;
 575        case FPSCR_RN1:
 576        case FPSCR_RN0:
 577            fpscr_set_rounding_mode(env);
 578            break;
 579        default:
 580            break;
 581        raise_excp:
 582            /* Update the floating-point enabled exception summary */
 583            env->fpscr |= FP_FEX;
 584            /* We have to update Rc1 before raising the exception */
 585            cs->exception_index = POWERPC_EXCP_PROGRAM;
 586            break;
 587        }
 588    }
 589}
 590
 591void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 592{
 593    CPUState *cs = env_cpu(env);
 594    target_ulong prev, new;
 595    int i;
 596
 597    prev = env->fpscr;
 598    new = (target_ulong)arg;
 599    new &= ~(FP_FEX | FP_VX);
 600    new |= prev & (FP_FEX | FP_VX);
 601    for (i = 0; i < sizeof(target_ulong) * 2; i++) {
 602        if (mask & (1 << i)) {
 603            env->fpscr &= ~(0xFLL << (4 * i));
 604            env->fpscr |= new & (0xFLL << (4 * i));
 605        }
 606    }
 607    /* Update VX and FEX */
 608    if (fpscr_ix != 0) {
 609        env->fpscr |= FP_VX;
 610    } else {
 611        env->fpscr &= ~FP_VX;
 612    }
 613    if ((fpscr_ex & fpscr_eex) != 0) {
 614        env->fpscr |= FP_FEX;
 615        cs->exception_index = POWERPC_EXCP_PROGRAM;
 616        /* XXX: we should compute it properly */
 617        env->error_code = POWERPC_EXCP_FP;
 618    } else {
 619        env->fpscr &= ~FP_FEX;
 620    }
 621    fpscr_set_rounding_mode(env);
 622}
 623
 624void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 625{
 626    helper_store_fpscr(env, arg, mask);
 627}
 628
 629static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
 630{
 631    CPUState *cs = env_cpu(env);
 632    int status = get_float_exception_flags(&env->fp_status);
 633
 634    if (status & float_flag_overflow) {
 635        float_overflow_excp(env);
 636    } else if (status & float_flag_underflow) {
 637        float_underflow_excp(env);
 638    }
 639    if (status & float_flag_inexact) {
 640        float_inexact_excp(env);
 641    } else {
 642        env->fpscr &= ~FP_FI; /* clear the FPSCR[FI] bit */
 643    }
 644
 645    if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
 646        (env->error_code & POWERPC_EXCP_FP)) {
 647        /* Deferred floating-point exception after target FPR update */
 648        if (fp_exceptions_enabled(env)) {
 649            raise_exception_err_ra(env, cs->exception_index,
 650                                   env->error_code, raddr);
 651        }
 652    }
 653}
 654
 655void helper_float_check_status(CPUPPCState *env)
 656{
 657    do_float_check_status(env, GETPC());
 658}
 659
 660void helper_reset_fpstatus(CPUPPCState *env)
 661{
 662    set_float_exception_flags(0, &env->fp_status);
 663}
 664
 665static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc,
 666                                    uintptr_t retaddr, int classes)
 667{
 668    if ((classes & ~is_neg) == is_inf) {
 669        /* Magnitude subtraction of infinities */
 670        float_invalid_op_vxisi(env, set_fpcc, retaddr);
 671    } else if (classes & is_snan) {
 672        float_invalid_op_vxsnan(env, retaddr);
 673    }
 674}
 675
 676/* fadd - fadd. */
 677float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
 678{
 679    float64 ret = float64_add(arg1, arg2, &env->fp_status);
 680    int status = get_float_exception_flags(&env->fp_status);
 681
 682    if (unlikely(status & float_flag_invalid)) {
 683        float_invalid_op_addsub(env, 1, GETPC(),
 684                                float64_classify(arg1) |
 685                                float64_classify(arg2));
 686    }
 687
 688    return ret;
 689}
 690
 691/* fsub - fsub. */
 692float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
 693{
 694    float64 ret = float64_sub(arg1, arg2, &env->fp_status);
 695    int status = get_float_exception_flags(&env->fp_status);
 696
 697    if (unlikely(status & float_flag_invalid)) {
 698        float_invalid_op_addsub(env, 1, GETPC(),
 699                                float64_classify(arg1) |
 700                                float64_classify(arg2));
 701    }
 702
 703    return ret;
 704}
 705
 706static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc,
 707                                 uintptr_t retaddr, int classes)
 708{
 709    if ((classes & (is_zero | is_inf)) == (is_zero | is_inf)) {
 710        /* Multiplication of zero by infinity */
 711        float_invalid_op_vximz(env, set_fprc, retaddr);
 712    } else if (classes & is_snan) {
 713        float_invalid_op_vxsnan(env, retaddr);
 714    }
 715}
 716
 717/* fmul - fmul. */
 718float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
 719{
 720    float64 ret = float64_mul(arg1, arg2, &env->fp_status);
 721    int status = get_float_exception_flags(&env->fp_status);
 722
 723    if (unlikely(status & float_flag_invalid)) {
 724        float_invalid_op_mul(env, 1, GETPC(),
 725                             float64_classify(arg1) |
 726                             float64_classify(arg2));
 727    }
 728
 729    return ret;
 730}
 731
 732static void float_invalid_op_div(CPUPPCState *env, bool set_fprc,
 733                                 uintptr_t retaddr, int classes)
 734{
 735    classes &= ~is_neg;
 736    if (classes == is_inf) {
 737        /* Division of infinity by infinity */
 738        float_invalid_op_vxidi(env, set_fprc, retaddr);
 739    } else if (classes == is_zero) {
 740        /* Division of zero by zero */
 741        float_invalid_op_vxzdz(env, set_fprc, retaddr);
 742    } else if (classes & is_snan) {
 743        float_invalid_op_vxsnan(env, retaddr);
 744    }
 745}
 746
 747/* fdiv - fdiv. */
 748float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
 749{
 750    float64 ret = float64_div(arg1, arg2, &env->fp_status);
 751    int status = get_float_exception_flags(&env->fp_status);
 752
 753    if (unlikely(status)) {
 754        if (status & float_flag_invalid) {
 755            float_invalid_op_div(env, 1, GETPC(),
 756                                 float64_classify(arg1) |
 757                                 float64_classify(arg2));
 758        }
 759        if (status & float_flag_divbyzero) {
 760            float_zero_divide_excp(env, GETPC());
 761        }
 762    }
 763
 764    return ret;
 765}
 766
 767static void float_invalid_cvt(CPUPPCState *env, bool set_fprc,
 768                              uintptr_t retaddr, int class1)
 769{
 770    float_invalid_op_vxcvi(env, set_fprc, retaddr);
 771    if (class1 & is_snan) {
 772        float_invalid_op_vxsnan(env, retaddr);
 773    }
 774}
 775
 776#define FPU_FCTI(op, cvt, nanval)                                      \
 777uint64_t helper_##op(CPUPPCState *env, float64 arg)                    \
 778{                                                                      \
 779    uint64_t ret = float64_to_##cvt(arg, &env->fp_status);             \
 780    int status = get_float_exception_flags(&env->fp_status);           \
 781                                                                       \
 782    if (unlikely(status)) {                                            \
 783        if (status & float_flag_invalid) {                             \
 784            float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \
 785            ret = nanval;                                              \
 786        }                                                              \
 787        do_float_check_status(env, GETPC());                           \
 788    }                                                                  \
 789    return ret;                                                        \
 790}
 791
 792FPU_FCTI(fctiw, int32, 0x80000000U)
 793FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
 794FPU_FCTI(fctiwu, uint32, 0x00000000U)
 795FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
 796FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
 797FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
 798FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
 799FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
 800
 801#define FPU_FCFI(op, cvtr, is_single)                      \
 802uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
 803{                                                          \
 804    CPU_DoubleU farg;                                      \
 805                                                           \
 806    if (is_single) {                                       \
 807        float32 tmp = cvtr(arg, &env->fp_status);          \
 808        farg.d = float32_to_float64(tmp, &env->fp_status); \
 809    } else {                                               \
 810        farg.d = cvtr(arg, &env->fp_status);               \
 811    }                                                      \
 812    do_float_check_status(env, GETPC());                   \
 813    return farg.ll;                                        \
 814}
 815
 816FPU_FCFI(fcfid, int64_to_float64, 0)
 817FPU_FCFI(fcfids, int64_to_float32, 1)
 818FPU_FCFI(fcfidu, uint64_to_float64, 0)
 819FPU_FCFI(fcfidus, uint64_to_float32, 1)
 820
 821static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
 822                              int rounding_mode)
 823{
 824    CPU_DoubleU farg;
 825
 826    farg.ll = arg;
 827
 828    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 829        /* sNaN round */
 830        float_invalid_op_vxsnan(env, GETPC());
 831        farg.ll = arg | 0x0008000000000000ULL;
 832    } else {
 833        int inexact = get_float_exception_flags(&env->fp_status) &
 834                      float_flag_inexact;
 835        set_float_rounding_mode(rounding_mode, &env->fp_status);
 836        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
 837        /* Restore rounding mode from FPSCR */
 838        fpscr_set_rounding_mode(env);
 839
 840        /* fri* does not set FPSCR[XX] */
 841        if (!inexact) {
 842            env->fp_status.float_exception_flags &= ~float_flag_inexact;
 843        }
 844    }
 845    do_float_check_status(env, GETPC());
 846    return farg.ll;
 847}
 848
 849uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
 850{
 851    return do_fri(env, arg, float_round_ties_away);
 852}
 853
 854uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
 855{
 856    return do_fri(env, arg, float_round_to_zero);
 857}
 858
 859uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
 860{
 861    return do_fri(env, arg, float_round_up);
 862}
 863
 864uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
 865{
 866    return do_fri(env, arg, float_round_down);
 867}
 868
 869#define FPU_MADDSUB_UPDATE(NAME, TP)                                    \
 870static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3,           \
 871                 unsigned int madd_flags, uintptr_t retaddr)            \
 872{                                                                       \
 873    if (TP##_is_signaling_nan(arg1, &env->fp_status) ||                 \
 874        TP##_is_signaling_nan(arg2, &env->fp_status) ||                 \
 875        TP##_is_signaling_nan(arg3, &env->fp_status)) {                 \
 876        /* sNaN operation */                                            \
 877        float_invalid_op_vxsnan(env, retaddr);                          \
 878    }                                                                   \
 879    if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) ||               \
 880        (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) {               \
 881        /* Multiplication of zero by infinity */                        \
 882        float_invalid_op_vximz(env, 1, retaddr);                        \
 883    }                                                                   \
 884    if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) &&           \
 885        TP##_is_infinity(arg3)) {                                       \
 886        uint8_t aSign, bSign, cSign;                                    \
 887                                                                        \
 888        aSign = TP##_is_neg(arg1);                                      \
 889        bSign = TP##_is_neg(arg2);                                      \
 890        cSign = TP##_is_neg(arg3);                                      \
 891        if (madd_flags & float_muladd_negate_c) {                       \
 892            cSign ^= 1;                                                 \
 893        }                                                               \
 894        if (aSign ^ bSign ^ cSign) {                                    \
 895            float_invalid_op_vxisi(env, 1, retaddr);                    \
 896        }                                                               \
 897    }                                                                   \
 898}
 899FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
 900FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
 901
 902#define FPU_FMADD(op, madd_flags)                                       \
 903uint64_t helper_##op(CPUPPCState *env, uint64_t arg1,                   \
 904                     uint64_t arg2, uint64_t arg3)                      \
 905{                                                                       \
 906    uint32_t flags;                                                     \
 907    float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags,          \
 908                                 &env->fp_status);                      \
 909    flags = get_float_exception_flags(&env->fp_status);                 \
 910    if (flags) {                                                        \
 911        if (flags & float_flag_invalid) {                               \
 912            float64_maddsub_update_excp(env, arg1, arg2, arg3,          \
 913                                        madd_flags, GETPC());           \
 914        }                                                               \
 915        do_float_check_status(env, GETPC());                            \
 916    }                                                                   \
 917    return ret;                                                         \
 918}
 919
 920#define MADD_FLGS 0
 921#define MSUB_FLGS float_muladd_negate_c
 922#define NMADD_FLGS float_muladd_negate_result
 923#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
 924
 925FPU_FMADD(fmadd, MADD_FLGS)
 926FPU_FMADD(fnmadd, NMADD_FLGS)
 927FPU_FMADD(fmsub, MSUB_FLGS)
 928FPU_FMADD(fnmsub, NMSUB_FLGS)
 929
 930/* frsp - frsp. */
 931uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
 932{
 933    CPU_DoubleU farg;
 934    float32 f32;
 935
 936    farg.ll = arg;
 937
 938    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 939        float_invalid_op_vxsnan(env, GETPC());
 940    }
 941    f32 = float64_to_float32(farg.d, &env->fp_status);
 942    farg.d = float32_to_float64(f32, &env->fp_status);
 943
 944    return farg.ll;
 945}
 946
 947/* fsqrt - fsqrt. */
 948float64 helper_fsqrt(CPUPPCState *env, float64 arg)
 949{
 950    float64 ret = float64_sqrt(arg, &env->fp_status);
 951    int status = get_float_exception_flags(&env->fp_status);
 952
 953    if (unlikely(status & float_flag_invalid)) {
 954        if (unlikely(float64_is_any_nan(arg))) {
 955            if (unlikely(float64_is_signaling_nan(arg, &env->fp_status))) {
 956                /* sNaN square root */
 957                float_invalid_op_vxsnan(env, GETPC());
 958            }
 959        } else {
 960            /* Square root of a negative nonzero number */
 961            float_invalid_op_vxsqrt(env, 1, GETPC());
 962        }
 963    }
 964
 965    return ret;
 966}
 967
 968/* fre - fre. */
 969float64 helper_fre(CPUPPCState *env, float64 arg)
 970{
 971    /* "Estimate" the reciprocal with actual division.  */
 972    float64 ret = float64_div(float64_one, arg, &env->fp_status);
 973    int status = get_float_exception_flags(&env->fp_status);
 974
 975    if (unlikely(status)) {
 976        if (status & float_flag_invalid) {
 977            if (float64_is_signaling_nan(arg, &env->fp_status)) {
 978                /* sNaN reciprocal */
 979                float_invalid_op_vxsnan(env, GETPC());
 980            }
 981        }
 982        if (status & float_flag_divbyzero) {
 983            float_zero_divide_excp(env, GETPC());
 984            /* For FPSCR.ZE == 0, the result is 1/2.  */
 985            ret = float64_set_sign(float64_half, float64_is_neg(arg));
 986        }
 987    }
 988
 989    return ret;
 990}
 991
 992/* fres - fres. */
 993uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
 994{
 995    CPU_DoubleU farg;
 996    float32 f32;
 997
 998    farg.ll = arg;
 999
1000    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
1001        /* sNaN reciprocal */
1002        float_invalid_op_vxsnan(env, GETPC());
1003    }
1004    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1005    f32 = float64_to_float32(farg.d, &env->fp_status);
1006    farg.d = float32_to_float64(f32, &env->fp_status);
1007
1008    return farg.ll;
1009}
1010
1011/* frsqrte  - frsqrte. */
1012float64 helper_frsqrte(CPUPPCState *env, float64 arg)
1013{
1014    /* "Estimate" the reciprocal with actual division.  */
1015    float64 rets = float64_sqrt(arg, &env->fp_status);
1016    float64 retd = float64_div(float64_one, rets, &env->fp_status);
1017    int status = get_float_exception_flags(&env->fp_status);
1018
1019    if (unlikely(status)) {
1020        if (status & float_flag_invalid) {
1021            if (float64_is_signaling_nan(arg, &env->fp_status)) {
1022                /* sNaN reciprocal */
1023                float_invalid_op_vxsnan(env, GETPC());
1024            } else {
1025                /* Square root of a negative nonzero number */
1026                float_invalid_op_vxsqrt(env, 1, GETPC());
1027            }
1028        }
1029        if (status & float_flag_divbyzero) {
1030            /* Reciprocal of (square root of) zero.  */
1031            float_zero_divide_excp(env, GETPC());
1032        }
1033    }
1034
1035    return retd;
1036}
1037
1038/* fsel - fsel. */
1039uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1040                     uint64_t arg3)
1041{
1042    CPU_DoubleU farg1;
1043
1044    farg1.ll = arg1;
1045
1046    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
1047        !float64_is_any_nan(farg1.d)) {
1048        return arg2;
1049    } else {
1050        return arg3;
1051    }
1052}
1053
1054uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
1055{
1056    int fe_flag = 0;
1057    int fg_flag = 0;
1058
1059    if (unlikely(float64_is_infinity(fra) ||
1060                 float64_is_infinity(frb) ||
1061                 float64_is_zero(frb))) {
1062        fe_flag = 1;
1063        fg_flag = 1;
1064    } else {
1065        int e_a = ppc_float64_get_unbiased_exp(fra);
1066        int e_b = ppc_float64_get_unbiased_exp(frb);
1067
1068        if (unlikely(float64_is_any_nan(fra) ||
1069                     float64_is_any_nan(frb))) {
1070            fe_flag = 1;
1071        } else if ((e_b <= -1022) || (e_b >= 1021)) {
1072            fe_flag = 1;
1073        } else if (!float64_is_zero(fra) &&
1074                   (((e_a - e_b) >= 1023) ||
1075                    ((e_a - e_b) <= -1021) ||
1076                    (e_a <= -970))) {
1077            fe_flag = 1;
1078        }
1079
1080        if (unlikely(float64_is_zero_or_denormal(frb))) {
1081            /* XB is not zero because of the above check and */
1082            /* so must be denormalized.                      */
1083            fg_flag = 1;
1084        }
1085    }
1086
1087    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1088}
1089
1090uint32_t helper_ftsqrt(uint64_t frb)
1091{
1092    int fe_flag = 0;
1093    int fg_flag = 0;
1094
1095    if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
1096        fe_flag = 1;
1097        fg_flag = 1;
1098    } else {
1099        int e_b = ppc_float64_get_unbiased_exp(frb);
1100
1101        if (unlikely(float64_is_any_nan(frb))) {
1102            fe_flag = 1;
1103        } else if (unlikely(float64_is_zero(frb))) {
1104            fe_flag = 1;
1105        } else if (unlikely(float64_is_neg(frb))) {
1106            fe_flag = 1;
1107        } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) {
1108            fe_flag = 1;
1109        }
1110
1111        if (unlikely(float64_is_zero_or_denormal(frb))) {
1112            /* XB is not zero because of the above check and */
1113            /* therefore must be denormalized.               */
1114            fg_flag = 1;
1115        }
1116    }
1117
1118    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1119}
1120
1121void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1122                  uint32_t crfD)
1123{
1124    CPU_DoubleU farg1, farg2;
1125    uint32_t ret = 0;
1126
1127    farg1.ll = arg1;
1128    farg2.ll = arg2;
1129
1130    if (unlikely(float64_is_any_nan(farg1.d) ||
1131                 float64_is_any_nan(farg2.d))) {
1132        ret = 0x01UL;
1133    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1134        ret = 0x08UL;
1135    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1136        ret = 0x04UL;
1137    } else {
1138        ret = 0x02UL;
1139    }
1140
1141    env->fpscr &= ~FP_FPCC;
1142    env->fpscr |= ret << FPSCR_FPCC;
1143    env->crf[crfD] = ret;
1144    if (unlikely(ret == 0x01UL
1145                 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1146                     float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1147        /* sNaN comparison */
1148        float_invalid_op_vxsnan(env, GETPC());
1149    }
1150}
1151
1152void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1153                  uint32_t crfD)
1154{
1155    CPU_DoubleU farg1, farg2;
1156    uint32_t ret = 0;
1157
1158    farg1.ll = arg1;
1159    farg2.ll = arg2;
1160
1161    if (unlikely(float64_is_any_nan(farg1.d) ||
1162                 float64_is_any_nan(farg2.d))) {
1163        ret = 0x01UL;
1164    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1165        ret = 0x08UL;
1166    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1167        ret = 0x04UL;
1168    } else {
1169        ret = 0x02UL;
1170    }
1171
1172    env->fpscr &= ~FP_FPCC;
1173    env->fpscr |= ret << FPSCR_FPCC;
1174    env->crf[crfD] = (uint32_t) ret;
1175    if (unlikely(ret == 0x01UL)) {
1176        float_invalid_op_vxvc(env, 1, GETPC());
1177        if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1178            float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1179            /* sNaN comparison */
1180            float_invalid_op_vxsnan(env, GETPC());
1181        }
1182    }
1183}
1184
1185/* Single-precision floating-point conversions */
1186static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1187{
1188    CPU_FloatU u;
1189
1190    u.f = int32_to_float32(val, &env->vec_status);
1191
1192    return u.l;
1193}
1194
1195static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1196{
1197    CPU_FloatU u;
1198
1199    u.f = uint32_to_float32(val, &env->vec_status);
1200
1201    return u.l;
1202}
1203
1204static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1205{
1206    CPU_FloatU u;
1207
1208    u.l = val;
1209    /* NaN are not treated the same way IEEE 754 does */
1210    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1211        return 0;
1212    }
1213
1214    return float32_to_int32(u.f, &env->vec_status);
1215}
1216
1217static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1218{
1219    CPU_FloatU u;
1220
1221    u.l = val;
1222    /* NaN are not treated the same way IEEE 754 does */
1223    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1224        return 0;
1225    }
1226
1227    return float32_to_uint32(u.f, &env->vec_status);
1228}
1229
1230static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1231{
1232    CPU_FloatU u;
1233
1234    u.l = val;
1235    /* NaN are not treated the same way IEEE 754 does */
1236    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1237        return 0;
1238    }
1239
1240    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1241}
1242
1243static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1244{
1245    CPU_FloatU u;
1246
1247    u.l = val;
1248    /* NaN are not treated the same way IEEE 754 does */
1249    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1250        return 0;
1251    }
1252
1253    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1254}
1255
1256static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1257{
1258    CPU_FloatU u;
1259    float32 tmp;
1260
1261    u.f = int32_to_float32(val, &env->vec_status);
1262    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1263    u.f = float32_div(u.f, tmp, &env->vec_status);
1264
1265    return u.l;
1266}
1267
1268static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1269{
1270    CPU_FloatU u;
1271    float32 tmp;
1272
1273    u.f = uint32_to_float32(val, &env->vec_status);
1274    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1275    u.f = float32_div(u.f, tmp, &env->vec_status);
1276
1277    return u.l;
1278}
1279
1280static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1281{
1282    CPU_FloatU u;
1283    float32 tmp;
1284
1285    u.l = val;
1286    /* NaN are not treated the same way IEEE 754 does */
1287    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1288        return 0;
1289    }
1290    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1291    u.f = float32_mul(u.f, tmp, &env->vec_status);
1292
1293    return float32_to_int32(u.f, &env->vec_status);
1294}
1295
1296static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1297{
1298    CPU_FloatU u;
1299    float32 tmp;
1300
1301    u.l = val;
1302    /* NaN are not treated the same way IEEE 754 does */
1303    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1304        return 0;
1305    }
1306    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1307    u.f = float32_mul(u.f, tmp, &env->vec_status);
1308
1309    return float32_to_uint32(u.f, &env->vec_status);
1310}
1311
1312#define HELPER_SPE_SINGLE_CONV(name)                              \
1313    uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1314    {                                                             \
1315        return e##name(env, val);                                 \
1316    }
1317/* efscfsi */
1318HELPER_SPE_SINGLE_CONV(fscfsi);
1319/* efscfui */
1320HELPER_SPE_SINGLE_CONV(fscfui);
1321/* efscfuf */
1322HELPER_SPE_SINGLE_CONV(fscfuf);
1323/* efscfsf */
1324HELPER_SPE_SINGLE_CONV(fscfsf);
1325/* efsctsi */
1326HELPER_SPE_SINGLE_CONV(fsctsi);
1327/* efsctui */
1328HELPER_SPE_SINGLE_CONV(fsctui);
1329/* efsctsiz */
1330HELPER_SPE_SINGLE_CONV(fsctsiz);
1331/* efsctuiz */
1332HELPER_SPE_SINGLE_CONV(fsctuiz);
1333/* efsctsf */
1334HELPER_SPE_SINGLE_CONV(fsctsf);
1335/* efsctuf */
1336HELPER_SPE_SINGLE_CONV(fsctuf);
1337
1338#define HELPER_SPE_VECTOR_CONV(name)                            \
1339    uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1340    {                                                           \
1341        return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1342            (uint64_t)e##name(env, val);                        \
1343    }
1344/* evfscfsi */
1345HELPER_SPE_VECTOR_CONV(fscfsi);
1346/* evfscfui */
1347HELPER_SPE_VECTOR_CONV(fscfui);
1348/* evfscfuf */
1349HELPER_SPE_VECTOR_CONV(fscfuf);
1350/* evfscfsf */
1351HELPER_SPE_VECTOR_CONV(fscfsf);
1352/* evfsctsi */
1353HELPER_SPE_VECTOR_CONV(fsctsi);
1354/* evfsctui */
1355HELPER_SPE_VECTOR_CONV(fsctui);
1356/* evfsctsiz */
1357HELPER_SPE_VECTOR_CONV(fsctsiz);
1358/* evfsctuiz */
1359HELPER_SPE_VECTOR_CONV(fsctuiz);
1360/* evfsctsf */
1361HELPER_SPE_VECTOR_CONV(fsctsf);
1362/* evfsctuf */
1363HELPER_SPE_VECTOR_CONV(fsctuf);
1364
1365/* Single-precision floating-point arithmetic */
1366static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1367{
1368    CPU_FloatU u1, u2;
1369
1370    u1.l = op1;
1371    u2.l = op2;
1372    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1373    return u1.l;
1374}
1375
1376static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1377{
1378    CPU_FloatU u1, u2;
1379
1380    u1.l = op1;
1381    u2.l = op2;
1382    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1383    return u1.l;
1384}
1385
1386static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1387{
1388    CPU_FloatU u1, u2;
1389
1390    u1.l = op1;
1391    u2.l = op2;
1392    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1393    return u1.l;
1394}
1395
1396static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1397{
1398    CPU_FloatU u1, u2;
1399
1400    u1.l = op1;
1401    u2.l = op2;
1402    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1403    return u1.l;
1404}
1405
1406#define HELPER_SPE_SINGLE_ARITH(name)                                   \
1407    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1408    {                                                                   \
1409        return e##name(env, op1, op2);                                  \
1410    }
1411/* efsadd */
1412HELPER_SPE_SINGLE_ARITH(fsadd);
1413/* efssub */
1414HELPER_SPE_SINGLE_ARITH(fssub);
1415/* efsmul */
1416HELPER_SPE_SINGLE_ARITH(fsmul);
1417/* efsdiv */
1418HELPER_SPE_SINGLE_ARITH(fsdiv);
1419
1420#define HELPER_SPE_VECTOR_ARITH(name)                                   \
1421    uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1422    {                                                                   \
1423        return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1424            (uint64_t)e##name(env, op1, op2);                           \
1425    }
1426/* evfsadd */
1427HELPER_SPE_VECTOR_ARITH(fsadd);
1428/* evfssub */
1429HELPER_SPE_VECTOR_ARITH(fssub);
1430/* evfsmul */
1431HELPER_SPE_VECTOR_ARITH(fsmul);
1432/* evfsdiv */
1433HELPER_SPE_VECTOR_ARITH(fsdiv);
1434
1435/* Single-precision floating-point comparisons */
1436static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1437{
1438    CPU_FloatU u1, u2;
1439
1440    u1.l = op1;
1441    u2.l = op2;
1442    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1443}
1444
1445static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1446{
1447    CPU_FloatU u1, u2;
1448
1449    u1.l = op1;
1450    u2.l = op2;
1451    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1452}
1453
1454static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1455{
1456    CPU_FloatU u1, u2;
1457
1458    u1.l = op1;
1459    u2.l = op2;
1460    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1461}
1462
1463static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1464{
1465    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1466    return efscmplt(env, op1, op2);
1467}
1468
1469static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1470{
1471    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1472    return efscmpgt(env, op1, op2);
1473}
1474
1475static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1476{
1477    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1478    return efscmpeq(env, op1, op2);
1479}
1480
1481#define HELPER_SINGLE_SPE_CMP(name)                                     \
1482    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1483    {                                                                   \
1484        return e##name(env, op1, op2);                                  \
1485    }
1486/* efststlt */
1487HELPER_SINGLE_SPE_CMP(fststlt);
1488/* efststgt */
1489HELPER_SINGLE_SPE_CMP(fststgt);
1490/* efststeq */
1491HELPER_SINGLE_SPE_CMP(fststeq);
1492/* efscmplt */
1493HELPER_SINGLE_SPE_CMP(fscmplt);
1494/* efscmpgt */
1495HELPER_SINGLE_SPE_CMP(fscmpgt);
1496/* efscmpeq */
1497HELPER_SINGLE_SPE_CMP(fscmpeq);
1498
1499static inline uint32_t evcmp_merge(int t0, int t1)
1500{
1501    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1502}
1503
1504#define HELPER_VECTOR_SPE_CMP(name)                                     \
1505    uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1506    {                                                                   \
1507        return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1508                           e##name(env, op1, op2));                     \
1509    }
1510/* evfststlt */
1511HELPER_VECTOR_SPE_CMP(fststlt);
1512/* evfststgt */
1513HELPER_VECTOR_SPE_CMP(fststgt);
1514/* evfststeq */
1515HELPER_VECTOR_SPE_CMP(fststeq);
1516/* evfscmplt */
1517HELPER_VECTOR_SPE_CMP(fscmplt);
1518/* evfscmpgt */
1519HELPER_VECTOR_SPE_CMP(fscmpgt);
1520/* evfscmpeq */
1521HELPER_VECTOR_SPE_CMP(fscmpeq);
1522
1523/* Double-precision floating-point conversion */
1524uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1525{
1526    CPU_DoubleU u;
1527
1528    u.d = int32_to_float64(val, &env->vec_status);
1529
1530    return u.ll;
1531}
1532
1533uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1534{
1535    CPU_DoubleU u;
1536
1537    u.d = int64_to_float64(val, &env->vec_status);
1538
1539    return u.ll;
1540}
1541
1542uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1543{
1544    CPU_DoubleU u;
1545
1546    u.d = uint32_to_float64(val, &env->vec_status);
1547
1548    return u.ll;
1549}
1550
1551uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1552{
1553    CPU_DoubleU u;
1554
1555    u.d = uint64_to_float64(val, &env->vec_status);
1556
1557    return u.ll;
1558}
1559
1560uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1561{
1562    CPU_DoubleU u;
1563
1564    u.ll = val;
1565    /* NaN are not treated the same way IEEE 754 does */
1566    if (unlikely(float64_is_any_nan(u.d))) {
1567        return 0;
1568    }
1569
1570    return float64_to_int32(u.d, &env->vec_status);
1571}
1572
1573uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1574{
1575    CPU_DoubleU u;
1576
1577    u.ll = val;
1578    /* NaN are not treated the same way IEEE 754 does */
1579    if (unlikely(float64_is_any_nan(u.d))) {
1580        return 0;
1581    }
1582
1583    return float64_to_uint32(u.d, &env->vec_status);
1584}
1585
1586uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1587{
1588    CPU_DoubleU u;
1589
1590    u.ll = val;
1591    /* NaN are not treated the same way IEEE 754 does */
1592    if (unlikely(float64_is_any_nan(u.d))) {
1593        return 0;
1594    }
1595
1596    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1597}
1598
1599uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1600{
1601    CPU_DoubleU u;
1602
1603    u.ll = val;
1604    /* NaN are not treated the same way IEEE 754 does */
1605    if (unlikely(float64_is_any_nan(u.d))) {
1606        return 0;
1607    }
1608
1609    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1610}
1611
1612uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1613{
1614    CPU_DoubleU u;
1615
1616    u.ll = val;
1617    /* NaN are not treated the same way IEEE 754 does */
1618    if (unlikely(float64_is_any_nan(u.d))) {
1619        return 0;
1620    }
1621
1622    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1623}
1624
1625uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1626{
1627    CPU_DoubleU u;
1628
1629    u.ll = val;
1630    /* NaN are not treated the same way IEEE 754 does */
1631    if (unlikely(float64_is_any_nan(u.d))) {
1632        return 0;
1633    }
1634
1635    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1636}
1637
1638uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1639{
1640    CPU_DoubleU u;
1641    float64 tmp;
1642
1643    u.d = int32_to_float64(val, &env->vec_status);
1644    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1645    u.d = float64_div(u.d, tmp, &env->vec_status);
1646
1647    return u.ll;
1648}
1649
1650uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1651{
1652    CPU_DoubleU u;
1653    float64 tmp;
1654
1655    u.d = uint32_to_float64(val, &env->vec_status);
1656    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1657    u.d = float64_div(u.d, tmp, &env->vec_status);
1658
1659    return u.ll;
1660}
1661
1662uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1663{
1664    CPU_DoubleU u;
1665    float64 tmp;
1666
1667    u.ll = val;
1668    /* NaN are not treated the same way IEEE 754 does */
1669    if (unlikely(float64_is_any_nan(u.d))) {
1670        return 0;
1671    }
1672    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1673    u.d = float64_mul(u.d, tmp, &env->vec_status);
1674
1675    return float64_to_int32(u.d, &env->vec_status);
1676}
1677
1678uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1679{
1680    CPU_DoubleU u;
1681    float64 tmp;
1682
1683    u.ll = val;
1684    /* NaN are not treated the same way IEEE 754 does */
1685    if (unlikely(float64_is_any_nan(u.d))) {
1686        return 0;
1687    }
1688    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1689    u.d = float64_mul(u.d, tmp, &env->vec_status);
1690
1691    return float64_to_uint32(u.d, &env->vec_status);
1692}
1693
1694uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1695{
1696    CPU_DoubleU u1;
1697    CPU_FloatU u2;
1698
1699    u1.ll = val;
1700    u2.f = float64_to_float32(u1.d, &env->vec_status);
1701
1702    return u2.l;
1703}
1704
1705uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1706{
1707    CPU_DoubleU u2;
1708    CPU_FloatU u1;
1709
1710    u1.l = val;
1711    u2.d = float32_to_float64(u1.f, &env->vec_status);
1712
1713    return u2.ll;
1714}
1715
1716/* Double precision fixed-point arithmetic */
1717uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1718{
1719    CPU_DoubleU u1, u2;
1720
1721    u1.ll = op1;
1722    u2.ll = op2;
1723    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1724    return u1.ll;
1725}
1726
1727uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1728{
1729    CPU_DoubleU u1, u2;
1730
1731    u1.ll = op1;
1732    u2.ll = op2;
1733    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1734    return u1.ll;
1735}
1736
1737uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1738{
1739    CPU_DoubleU u1, u2;
1740
1741    u1.ll = op1;
1742    u2.ll = op2;
1743    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1744    return u1.ll;
1745}
1746
1747uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1748{
1749    CPU_DoubleU u1, u2;
1750
1751    u1.ll = op1;
1752    u2.ll = op2;
1753    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1754    return u1.ll;
1755}
1756
1757/* Double precision floating point helpers */
1758uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1759{
1760    CPU_DoubleU u1, u2;
1761
1762    u1.ll = op1;
1763    u2.ll = op2;
1764    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1765}
1766
1767uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1768{
1769    CPU_DoubleU u1, u2;
1770
1771    u1.ll = op1;
1772    u2.ll = op2;
1773    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1774}
1775
1776uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1777{
1778    CPU_DoubleU u1, u2;
1779
1780    u1.ll = op1;
1781    u2.ll = op2;
1782    return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1783}
1784
1785uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1786{
1787    /* XXX: TODO: test special values (NaN, infinites, ...) */
1788    return helper_efdtstlt(env, op1, op2);
1789}
1790
1791uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1792{
1793    /* XXX: TODO: test special values (NaN, infinites, ...) */
1794    return helper_efdtstgt(env, op1, op2);
1795}
1796
1797uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1798{
1799    /* XXX: TODO: test special values (NaN, infinites, ...) */
1800    return helper_efdtsteq(env, op1, op2);
1801}
1802
1803#define float64_to_float64(x, env) x
1804
1805
1806/*
1807 * VSX_ADD_SUB - VSX floating point add/subtract
1808 *   name  - instruction mnemonic
1809 *   op    - operation (add or sub)
1810 *   nels  - number of elements (1, 2 or 4)
1811 *   tp    - type (float32 or float64)
1812 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1813 *   sfprf - set FPRF
1814 */
1815#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
1816void helper_##name(CPUPPCState *env, ppc_vsr_t *xt,                          \
1817                   ppc_vsr_t *xa, ppc_vsr_t *xb)                             \
1818{                                                                            \
1819    ppc_vsr_t t = *xt;                                                       \
1820    int i;                                                                   \
1821                                                                             \
1822    helper_reset_fpstatus(env);                                              \
1823                                                                             \
1824    for (i = 0; i < nels; i++) {                                             \
1825        float_status tstat = env->fp_status;                                 \
1826        set_float_exception_flags(0, &tstat);                                \
1827        t.fld = tp##_##op(xa->fld, xb->fld, &tstat);                         \
1828        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1829                                                                             \
1830        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1831            float_invalid_op_addsub(env, sfprf, GETPC(),                     \
1832                                    tp##_classify(xa->fld) |                 \
1833                                    tp##_classify(xb->fld));                 \
1834        }                                                                    \
1835                                                                             \
1836        if (r2sp) {                                                          \
1837            t.fld = helper_frsp(env, t.fld);                                 \
1838        }                                                                    \
1839                                                                             \
1840        if (sfprf) {                                                         \
1841            helper_compute_fprf_float64(env, t.fld);                         \
1842        }                                                                    \
1843    }                                                                        \
1844    *xt = t;                                                                 \
1845    do_float_check_status(env, GETPC());                                     \
1846}
1847
1848VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1849VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1850VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1851VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1852VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1853VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1854VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1855VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1856
1857void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
1858                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1859{
1860    ppc_vsr_t t = *xt;
1861    float_status tstat;
1862
1863    helper_reset_fpstatus(env);
1864
1865    tstat = env->fp_status;
1866    if (unlikely(Rc(opcode) != 0)) {
1867        tstat.float_rounding_mode = float_round_to_odd;
1868    }
1869
1870    set_float_exception_flags(0, &tstat);
1871    t.f128 = float128_add(xa->f128, xb->f128, &tstat);
1872    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1873
1874    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1875        float_invalid_op_addsub(env, 1, GETPC(),
1876                                float128_classify(xa->f128) |
1877                                float128_classify(xb->f128));
1878    }
1879
1880    helper_compute_fprf_float128(env, t.f128);
1881
1882    *xt = t;
1883    do_float_check_status(env, GETPC());
1884}
1885
1886/*
1887 * VSX_MUL - VSX floating point multiply
1888 *   op    - instruction mnemonic
1889 *   nels  - number of elements (1, 2 or 4)
1890 *   tp    - type (float32 or float64)
1891 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1892 *   sfprf - set FPRF
1893 */
1894#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
1895void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                            \
1896                 ppc_vsr_t *xa, ppc_vsr_t *xb)                               \
1897{                                                                            \
1898    ppc_vsr_t t = *xt;                                                       \
1899    int i;                                                                   \
1900                                                                             \
1901    helper_reset_fpstatus(env);                                              \
1902                                                                             \
1903    for (i = 0; i < nels; i++) {                                             \
1904        float_status tstat = env->fp_status;                                 \
1905        set_float_exception_flags(0, &tstat);                                \
1906        t.fld = tp##_mul(xa->fld, xb->fld, &tstat);                          \
1907        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1908                                                                             \
1909        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1910            float_invalid_op_mul(env, sfprf, GETPC(),                        \
1911                                 tp##_classify(xa->fld) |                    \
1912                                 tp##_classify(xb->fld));                    \
1913        }                                                                    \
1914                                                                             \
1915        if (r2sp) {                                                          \
1916            t.fld = helper_frsp(env, t.fld);                                 \
1917        }                                                                    \
1918                                                                             \
1919        if (sfprf) {                                                         \
1920            helper_compute_fprf_float64(env, t.fld);                         \
1921        }                                                                    \
1922    }                                                                        \
1923                                                                             \
1924    *xt = t;                                                                 \
1925    do_float_check_status(env, GETPC());                                     \
1926}
1927
1928VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1929VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1930VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1931VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1932
1933void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
1934                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1935{
1936    ppc_vsr_t t = *xt;
1937    float_status tstat;
1938
1939    helper_reset_fpstatus(env);
1940    tstat = env->fp_status;
1941    if (unlikely(Rc(opcode) != 0)) {
1942        tstat.float_rounding_mode = float_round_to_odd;
1943    }
1944
1945    set_float_exception_flags(0, &tstat);
1946    t.f128 = float128_mul(xa->f128, xb->f128, &tstat);
1947    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1948
1949    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1950        float_invalid_op_mul(env, 1, GETPC(),
1951                             float128_classify(xa->f128) |
1952                             float128_classify(xb->f128));
1953    }
1954    helper_compute_fprf_float128(env, t.f128);
1955
1956    *xt = t;
1957    do_float_check_status(env, GETPC());
1958}
1959
1960/*
1961 * VSX_DIV - VSX floating point divide
1962 *   op    - instruction mnemonic
1963 *   nels  - number of elements (1, 2 or 4)
1964 *   tp    - type (float32 or float64)
1965 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1966 *   sfprf - set FPRF
1967 */
1968#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
1969void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
1970                 ppc_vsr_t *xa, ppc_vsr_t *xb)                                \
1971{                                                                             \
1972    ppc_vsr_t t = *xt;                                                        \
1973    int i;                                                                    \
1974                                                                              \
1975    helper_reset_fpstatus(env);                                               \
1976                                                                              \
1977    for (i = 0; i < nels; i++) {                                              \
1978        float_status tstat = env->fp_status;                                  \
1979        set_float_exception_flags(0, &tstat);                                 \
1980        t.fld = tp##_div(xa->fld, xb->fld, &tstat);                           \
1981        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1982                                                                              \
1983        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1984            float_invalid_op_div(env, sfprf, GETPC(),                         \
1985                                 tp##_classify(xa->fld) |                     \
1986                                 tp##_classify(xb->fld));                     \
1987        }                                                                     \
1988        if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {   \
1989            float_zero_divide_excp(env, GETPC());                             \
1990        }                                                                     \
1991                                                                              \
1992        if (r2sp) {                                                           \
1993            t.fld = helper_frsp(env, t.fld);                                  \
1994        }                                                                     \
1995                                                                              \
1996        if (sfprf) {                                                          \
1997            helper_compute_fprf_float64(env, t.fld);                          \
1998        }                                                                     \
1999    }                                                                         \
2000                                                                              \
2001    *xt = t;                                                                  \
2002    do_float_check_status(env, GETPC());                                      \
2003}
2004
2005VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
2006VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
2007VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
2008VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
2009
2010void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
2011                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
2012{
2013    ppc_vsr_t t = *xt;
2014    float_status tstat;
2015
2016    helper_reset_fpstatus(env);
2017    tstat = env->fp_status;
2018    if (unlikely(Rc(opcode) != 0)) {
2019        tstat.float_rounding_mode = float_round_to_odd;
2020    }
2021
2022    set_float_exception_flags(0, &tstat);
2023    t.f128 = float128_div(xa->f128, xb->f128, &tstat);
2024    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2025
2026    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
2027        float_invalid_op_div(env, 1, GETPC(),
2028                             float128_classify(xa->f128) |
2029                             float128_classify(xb->f128));
2030    }
2031    if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
2032        float_zero_divide_excp(env, GETPC());
2033    }
2034
2035    helper_compute_fprf_float128(env, t.f128);
2036    *xt = t;
2037    do_float_check_status(env, GETPC());
2038}
2039
2040/*
2041 * VSX_RE  - VSX floating point reciprocal estimate
2042 *   op    - instruction mnemonic
2043 *   nels  - number of elements (1, 2 or 4)
2044 *   tp    - type (float32 or float64)
2045 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2046 *   sfprf - set FPRF
2047 */
2048#define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
2049void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)              \
2050{                                                                             \
2051    ppc_vsr_t t = *xt;                                                        \
2052    int i;                                                                    \
2053                                                                              \
2054    helper_reset_fpstatus(env);                                               \
2055                                                                              \
2056    for (i = 0; i < nels; i++) {                                              \
2057        if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) {      \
2058            float_invalid_op_vxsnan(env, GETPC());                            \
2059        }                                                                     \
2060        t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status);                 \
2061                                                                              \
2062        if (r2sp) {                                                           \
2063            t.fld = helper_frsp(env, t.fld);                                  \
2064        }                                                                     \
2065                                                                              \
2066        if (sfprf) {                                                          \
2067            helper_compute_fprf_float64(env, t.fld);                          \
2068        }                                                                     \
2069    }                                                                         \
2070                                                                              \
2071    *xt = t;                                                                  \
2072    do_float_check_status(env, GETPC());                                      \
2073}
2074
2075VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
2076VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
2077VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
2078VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
2079
2080/*
2081 * VSX_SQRT - VSX floating point square root
2082 *   op    - instruction mnemonic
2083 *   nels  - number of elements (1, 2 or 4)
2084 *   tp    - type (float32 or float64)
2085 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2086 *   sfprf - set FPRF
2087 */
2088#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
2089void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2090{                                                                            \
2091    ppc_vsr_t t = *xt;                                                       \
2092    int i;                                                                   \
2093                                                                             \
2094    helper_reset_fpstatus(env);                                              \
2095                                                                             \
2096    for (i = 0; i < nels; i++) {                                             \
2097        float_status tstat = env->fp_status;                                 \
2098        set_float_exception_flags(0, &tstat);                                \
2099        t.fld = tp##_sqrt(xb->fld, &tstat);                                  \
2100        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2101                                                                             \
2102        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2103            if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) {            \
2104                float_invalid_op_vxsqrt(env, sfprf, GETPC());                \
2105            } else if (tp##_is_signaling_nan(xb->fld, &tstat)) {             \
2106                float_invalid_op_vxsnan(env, GETPC());                       \
2107            }                                                                \
2108        }                                                                    \
2109                                                                             \
2110        if (r2sp) {                                                          \
2111            t.fld = helper_frsp(env, t.fld);                                 \
2112        }                                                                    \
2113                                                                             \
2114        if (sfprf) {                                                         \
2115            helper_compute_fprf_float64(env, t.fld);                         \
2116        }                                                                    \
2117    }                                                                        \
2118                                                                             \
2119    *xt = t;                                                                 \
2120    do_float_check_status(env, GETPC());                                     \
2121}
2122
2123VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2124VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2125VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2126VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2127
2128/*
2129 *VSX_RSQRTE - VSX floating point reciprocal square root estimate
2130 *   op    - instruction mnemonic
2131 *   nels  - number of elements (1, 2 or 4)
2132 *   tp    - type (float32 or float64)
2133 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2134 *   sfprf - set FPRF
2135 */
2136#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
2137void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2138{                                                                            \
2139    ppc_vsr_t t = *xt;                                                       \
2140    int i;                                                                   \
2141                                                                             \
2142    helper_reset_fpstatus(env);                                              \
2143                                                                             \
2144    for (i = 0; i < nels; i++) {                                             \
2145        float_status tstat = env->fp_status;                                 \
2146        set_float_exception_flags(0, &tstat);                                \
2147        t.fld = tp##_sqrt(xb->fld, &tstat);                                  \
2148        t.fld = tp##_div(tp##_one, t.fld, &tstat);                           \
2149        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2150                                                                             \
2151        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2152            if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) {            \
2153                float_invalid_op_vxsqrt(env, sfprf, GETPC());                \
2154            } else if (tp##_is_signaling_nan(xb->fld, &tstat)) {             \
2155                float_invalid_op_vxsnan(env, GETPC());                       \
2156            }                                                                \
2157        }                                                                    \
2158                                                                             \
2159        if (r2sp) {                                                          \
2160            t.fld = helper_frsp(env, t.fld);                                 \
2161        }                                                                    \
2162                                                                             \
2163        if (sfprf) {                                                         \
2164            helper_compute_fprf_float64(env, t.fld);                         \
2165        }                                                                    \
2166    }                                                                        \
2167                                                                             \
2168    *xt = t;                                                                 \
2169    do_float_check_status(env, GETPC());                                     \
2170}
2171
2172VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2173VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2174VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2175VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2176
2177/*
2178 * VSX_TDIV - VSX floating point test for divide
2179 *   op    - instruction mnemonic
2180 *   nels  - number of elements (1, 2 or 4)
2181 *   tp    - type (float32 or float64)
2182 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2183 *   emin  - minimum unbiased exponent
2184 *   emax  - maximum unbiased exponent
2185 *   nbits - number of fraction bits
2186 */
2187#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2188void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
2189                 ppc_vsr_t *xa, ppc_vsr_t *xb)                          \
2190{                                                                       \
2191    int i;                                                              \
2192    int fe_flag = 0;                                                    \
2193    int fg_flag = 0;                                                    \
2194                                                                        \
2195    for (i = 0; i < nels; i++) {                                        \
2196        if (unlikely(tp##_is_infinity(xa->fld) ||                       \
2197                     tp##_is_infinity(xb->fld) ||                       \
2198                     tp##_is_zero(xb->fld))) {                          \
2199            fe_flag = 1;                                                \
2200            fg_flag = 1;                                                \
2201        } else {                                                        \
2202            int e_a = ppc_##tp##_get_unbiased_exp(xa->fld);             \
2203            int e_b = ppc_##tp##_get_unbiased_exp(xb->fld);             \
2204                                                                        \
2205            if (unlikely(tp##_is_any_nan(xa->fld) ||                    \
2206                         tp##_is_any_nan(xb->fld))) {                   \
2207                fe_flag = 1;                                            \
2208            } else if ((e_b <= emin) || (e_b >= (emax - 2))) {          \
2209                fe_flag = 1;                                            \
2210            } else if (!tp##_is_zero(xa->fld) &&                        \
2211                       (((e_a - e_b) >= emax) ||                        \
2212                        ((e_a - e_b) <= (emin + 1)) ||                  \
2213                        (e_a <= (emin + nbits)))) {                     \
2214                fe_flag = 1;                                            \
2215            }                                                           \
2216                                                                        \
2217            if (unlikely(tp##_is_zero_or_denormal(xb->fld))) {          \
2218                /*                                                      \
2219                 * XB is not zero because of the above check and so     \
2220                 * must be denormalized.                                \
2221                 */                                                     \
2222                fg_flag = 1;                                            \
2223            }                                                           \
2224        }                                                               \
2225    }                                                                   \
2226                                                                        \
2227    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2228}
2229
2230VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2231VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2232VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2233
2234/*
2235 * VSX_TSQRT - VSX floating point test for square root
2236 *   op    - instruction mnemonic
2237 *   nels  - number of elements (1, 2 or 4)
2238 *   tp    - type (float32 or float64)
2239 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2240 *   emin  - minimum unbiased exponent
2241 *   emax  - maximum unbiased exponent
2242 *   nbits - number of fraction bits
2243 */
2244#define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2245void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)      \
2246{                                                                       \
2247    int i;                                                              \
2248    int fe_flag = 0;                                                    \
2249    int fg_flag = 0;                                                    \
2250                                                                        \
2251    for (i = 0; i < nels; i++) {                                        \
2252        if (unlikely(tp##_is_infinity(xb->fld) ||                       \
2253                     tp##_is_zero(xb->fld))) {                          \
2254            fe_flag = 1;                                                \
2255            fg_flag = 1;                                                \
2256        } else {                                                        \
2257            int e_b = ppc_##tp##_get_unbiased_exp(xb->fld);             \
2258                                                                        \
2259            if (unlikely(tp##_is_any_nan(xb->fld))) {                   \
2260                fe_flag = 1;                                            \
2261            } else if (unlikely(tp##_is_zero(xb->fld))) {               \
2262                fe_flag = 1;                                            \
2263            } else if (unlikely(tp##_is_neg(xb->fld))) {                \
2264                fe_flag = 1;                                            \
2265            } else if (!tp##_is_zero(xb->fld) &&                        \
2266                       (e_b <= (emin + nbits))) {                       \
2267                fe_flag = 1;                                            \
2268            }                                                           \
2269                                                                        \
2270            if (unlikely(tp##_is_zero_or_denormal(xb->fld))) {          \
2271                /*                                                      \
2272                 * XB is not zero because of the above check and        \
2273                 * therefore must be denormalized.                      \
2274                 */                                                     \
2275                fg_flag = 1;                                            \
2276            }                                                           \
2277        }                                                               \
2278    }                                                                   \
2279                                                                        \
2280    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2281}
2282
2283VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2284VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2285VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2286
2287/*
2288 * VSX_MADD - VSX floating point muliply/add variations
2289 *   op    - instruction mnemonic
2290 *   nels  - number of elements (1, 2 or 4)
2291 *   tp    - type (float32 or float64)
2292 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2293 *   maddflgs - flags for the float*muladd routine that control the
2294 *           various forms (madd, msub, nmadd, nmsub)
2295 *   sfprf - set FPRF
2296 */
2297#define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp)                    \
2298void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
2299                 ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c)                   \
2300{                                                                             \
2301    ppc_vsr_t t = *xt;                                                        \
2302    int i;                                                                    \
2303                                                                              \
2304    helper_reset_fpstatus(env);                                               \
2305                                                                              \
2306    for (i = 0; i < nels; i++) {                                              \
2307        float_status tstat = env->fp_status;                                  \
2308        set_float_exception_flags(0, &tstat);                                 \
2309        if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2310            /*                                                                \
2311             * Avoid double rounding errors by rounding the intermediate      \
2312             * result to odd.                                                 \
2313             */                                                               \
2314            set_float_rounding_mode(float_round_to_zero, &tstat);             \
2315            t.fld = tp##_muladd(xa->fld, b->fld, c->fld,                      \
2316                                maddflgs, &tstat);                            \
2317            t.fld |= (get_float_exception_flags(&tstat) &                     \
2318                      float_flag_inexact) != 0;                               \
2319        } else {                                                              \
2320            t.fld = tp##_muladd(xa->fld, b->fld, c->fld,                      \
2321                                maddflgs, &tstat);                            \
2322        }                                                                     \
2323        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2324                                                                              \
2325        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2326            tp##_maddsub_update_excp(env, xa->fld, b->fld,                    \
2327                                     c->fld, maddflgs, GETPC());              \
2328        }                                                                     \
2329                                                                              \
2330        if (r2sp) {                                                           \
2331            t.fld = helper_frsp(env, t.fld);                                  \
2332        }                                                                     \
2333                                                                              \
2334        if (sfprf) {                                                          \
2335            helper_compute_fprf_float64(env, t.fld);                          \
2336        }                                                                     \
2337    }                                                                         \
2338    *xt = t;                                                                  \
2339    do_float_check_status(env, GETPC());                                      \
2340}
2341
2342VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0)
2343VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0)
2344VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0)
2345VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0)
2346VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1)
2347VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1)
2348VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1)
2349VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1)
2350
2351VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0)
2352VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0)
2353VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0)
2354VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0)
2355
2356VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0)
2357VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0)
2358VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0)
2359VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0)
2360
2361/*
2362 * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2363 *   op    - instruction mnemonic
2364 *   cmp   - comparison operation
2365 *   exp   - expected result of comparison
2366 *   svxvc - set VXVC bit
2367 */
2368#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc)                                \
2369void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
2370                 ppc_vsr_t *xa, ppc_vsr_t *xb)                                \
2371{                                                                             \
2372    ppc_vsr_t t = *xt;                                                        \
2373    bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false;            \
2374                                                                              \
2375    if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||             \
2376        float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {             \
2377        vxsnan_flag = true;                                                   \
2378        if (fpscr_ve == 0 && svxvc) {                                         \
2379            vxvc_flag = true;                                                 \
2380        }                                                                     \
2381    } else if (svxvc) {                                                       \
2382        vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||     \
2383            float64_is_quiet_nan(xb->VsrD(0), &env->fp_status);               \
2384    }                                                                         \
2385    if (vxsnan_flag) {                                                        \
2386        float_invalid_op_vxsnan(env, GETPC());                                \
2387    }                                                                         \
2388    if (vxvc_flag) {                                                          \
2389        float_invalid_op_vxvc(env, 0, GETPC());                               \
2390    }                                                                         \
2391    vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag);                        \
2392                                                                              \
2393    if (!vex_flag) {                                                          \
2394        if (float64_##cmp(xb->VsrD(0), xa->VsrD(0),                           \
2395                          &env->fp_status) == exp) {                          \
2396            t.VsrD(0) = -1;                                                   \
2397            t.VsrD(1) = 0;                                                    \
2398        } else {                                                              \
2399            t.VsrD(0) = 0;                                                    \
2400            t.VsrD(1) = 0;                                                    \
2401        }                                                                     \
2402    }                                                                         \
2403    *xt = t;                                                                  \
2404    do_float_check_status(env, GETPC());                                      \
2405}
2406
2407VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2408VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2409VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2410VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2411
2412void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
2413                       ppc_vsr_t *xa, ppc_vsr_t *xb)
2414{
2415    int64_t exp_a, exp_b;
2416    uint32_t cc;
2417
2418    exp_a = extract64(xa->VsrD(0), 52, 11);
2419    exp_b = extract64(xb->VsrD(0), 52, 11);
2420
2421    if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||
2422                 float64_is_any_nan(xb->VsrD(0)))) {
2423        cc = CRF_SO;
2424    } else {
2425        if (exp_a < exp_b) {
2426            cc = CRF_LT;
2427        } else if (exp_a > exp_b) {
2428            cc = CRF_GT;
2429        } else {
2430            cc = CRF_EQ;
2431        }
2432    }
2433
2434    env->fpscr &= ~FP_FPCC;
2435    env->fpscr |= cc << FPSCR_FPCC;
2436    env->crf[BF(opcode)] = cc;
2437
2438    do_float_check_status(env, GETPC());
2439}
2440
2441void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
2442                       ppc_vsr_t *xa, ppc_vsr_t *xb)
2443{
2444    int64_t exp_a, exp_b;
2445    uint32_t cc;
2446
2447    exp_a = extract64(xa->VsrD(0), 48, 15);
2448    exp_b = extract64(xb->VsrD(0), 48, 15);
2449
2450    if (unlikely(float128_is_any_nan(xa->f128) ||
2451                 float128_is_any_nan(xb->f128))) {
2452        cc = CRF_SO;
2453    } else {
2454        if (exp_a < exp_b) {
2455            cc = CRF_LT;
2456        } else if (exp_a > exp_b) {
2457            cc = CRF_GT;
2458        } else {
2459            cc = CRF_EQ;
2460        }
2461    }
2462
2463    env->fpscr &= ~FP_FPCC;
2464    env->fpscr |= cc << FPSCR_FPCC;
2465    env->crf[BF(opcode)] = cc;
2466
2467    do_float_check_status(env, GETPC());
2468}
2469
2470static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb,
2471                                 int crf_idx, bool ordered)
2472{
2473    uint32_t cc;
2474    bool vxsnan_flag = false, vxvc_flag = false;
2475
2476    helper_reset_fpstatus(env);
2477
2478    switch (float64_compare(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) {
2479    case float_relation_less:
2480        cc = CRF_LT;
2481        break;
2482    case float_relation_equal:
2483        cc = CRF_EQ;
2484        break;
2485    case float_relation_greater:
2486        cc = CRF_GT;
2487        break;
2488    case float_relation_unordered:
2489        cc = CRF_SO;
2490
2491        if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||
2492            float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {
2493            vxsnan_flag = true;
2494            if (fpscr_ve == 0 && ordered) {
2495                vxvc_flag = true;
2496            }
2497        } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||
2498                   float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) {
2499            if (ordered) {
2500                vxvc_flag = true;
2501            }
2502        }
2503
2504        break;
2505    default:
2506        g_assert_not_reached();
2507    }
2508
2509    env->fpscr &= ~FP_FPCC;
2510    env->fpscr |= cc << FPSCR_FPCC;
2511    env->crf[crf_idx] = cc;
2512
2513    if (vxsnan_flag) {
2514        float_invalid_op_vxsnan(env, GETPC());
2515    }
2516    if (vxvc_flag) {
2517        float_invalid_op_vxvc(env, 0, GETPC());
2518    }
2519
2520    do_float_check_status(env, GETPC());
2521}
2522
2523void helper_xscmpodp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2524                     ppc_vsr_t *xb)
2525{
2526    do_scalar_cmp(env, xa, xb, BF(opcode), true);
2527}
2528
2529void helper_xscmpudp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2530                     ppc_vsr_t *xb)
2531{
2532    do_scalar_cmp(env, xa, xb, BF(opcode), false);
2533}
2534
2535static inline void do_scalar_cmpq(CPUPPCState *env, ppc_vsr_t *xa,
2536                                  ppc_vsr_t *xb, int crf_idx, bool ordered)
2537{
2538    uint32_t cc;
2539    bool vxsnan_flag = false, vxvc_flag = false;
2540
2541    helper_reset_fpstatus(env);
2542
2543    switch (float128_compare(xa->f128, xb->f128, &env->fp_status)) {
2544    case float_relation_less:
2545        cc = CRF_LT;
2546        break;
2547    case float_relation_equal:
2548        cc = CRF_EQ;
2549        break;
2550    case float_relation_greater:
2551        cc = CRF_GT;
2552        break;
2553    case float_relation_unordered:
2554        cc = CRF_SO;
2555
2556        if (float128_is_signaling_nan(xa->f128, &env->fp_status) ||
2557            float128_is_signaling_nan(xb->f128, &env->fp_status)) {
2558            vxsnan_flag = true;
2559            if (fpscr_ve == 0 && ordered) {
2560                vxvc_flag = true;
2561            }
2562        } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) ||
2563                   float128_is_quiet_nan(xb->f128, &env->fp_status)) {
2564            if (ordered) {
2565                vxvc_flag = true;
2566            }
2567        }
2568
2569        break;
2570    default:
2571        g_assert_not_reached();
2572    }
2573
2574    env->fpscr &= ~FP_FPCC;
2575    env->fpscr |= cc << FPSCR_FPCC;
2576    env->crf[crf_idx] = cc;
2577
2578    if (vxsnan_flag) {
2579        float_invalid_op_vxsnan(env, GETPC());
2580    }
2581    if (vxvc_flag) {
2582        float_invalid_op_vxvc(env, 0, GETPC());
2583    }
2584
2585    do_float_check_status(env, GETPC());
2586}
2587
2588void helper_xscmpoqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2589                     ppc_vsr_t *xb)
2590{
2591    do_scalar_cmpq(env, xa, xb, BF(opcode), true);
2592}
2593
2594void helper_xscmpuqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2595                     ppc_vsr_t *xb)
2596{
2597    do_scalar_cmpq(env, xa, xb, BF(opcode), false);
2598}
2599
2600/*
2601 * VSX_MAX_MIN - VSX floating point maximum/minimum
2602 *   name  - instruction mnemonic
2603 *   op    - operation (max or min)
2604 *   nels  - number of elements (1, 2 or 4)
2605 *   tp    - type (float32 or float64)
2606 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2607 */
2608#define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2609void helper_##name(CPUPPCState *env, ppc_vsr_t *xt,                           \
2610                   ppc_vsr_t *xa, ppc_vsr_t *xb)                              \
2611{                                                                             \
2612    ppc_vsr_t t = *xt;                                                        \
2613    int i;                                                                    \
2614                                                                              \
2615    for (i = 0; i < nels; i++) {                                              \
2616        t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status);                 \
2617        if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) ||       \
2618                     tp##_is_signaling_nan(xb->fld, &env->fp_status))) {      \
2619            float_invalid_op_vxsnan(env, GETPC());                            \
2620        }                                                                     \
2621    }                                                                         \
2622                                                                              \
2623    *xt = t;                                                                  \
2624    do_float_check_status(env, GETPC());                                      \
2625}
2626
2627VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2628VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2629VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2630VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2631VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2632VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2633
2634#define VSX_MAX_MINC(name, max)                                               \
2635void helper_##name(CPUPPCState *env, uint32_t opcode,                         \
2636                   ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)               \
2637{                                                                             \
2638    ppc_vsr_t t = *xt;                                                        \
2639    bool vxsnan_flag = false, vex_flag = false;                               \
2640                                                                              \
2641    if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||                           \
2642                 float64_is_any_nan(xb->VsrD(0)))) {                          \
2643        if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||         \
2644            float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {         \
2645            vxsnan_flag = true;                                               \
2646        }                                                                     \
2647        t.VsrD(0) = xb->VsrD(0);                                              \
2648    } else if ((max &&                                                        \
2649               !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) ||     \
2650               (!max &&                                                       \
2651               float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) {      \
2652        t.VsrD(0) = xa->VsrD(0);                                              \
2653    } else {                                                                  \
2654        t.VsrD(0) = xb->VsrD(0);                                              \
2655    }                                                                         \
2656                                                                              \
2657    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2658    if (vxsnan_flag) {                                                        \
2659        float_invalid_op_vxsnan(env, GETPC());                                \
2660    }                                                                         \
2661    if (!vex_flag) {                                                          \
2662        *xt = t;                                                              \
2663    }                                                                         \
2664}                                                                             \
2665
2666VSX_MAX_MINC(xsmaxcdp, 1);
2667VSX_MAX_MINC(xsmincdp, 0);
2668
2669#define VSX_MAX_MINJ(name, max)                                               \
2670void helper_##name(CPUPPCState *env, uint32_t opcode,                         \
2671                   ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)               \
2672{                                                                             \
2673    ppc_vsr_t t = *xt;                                                        \
2674    bool vxsnan_flag = false, vex_flag = false;                               \
2675                                                                              \
2676    if (unlikely(float64_is_any_nan(xa->VsrD(0)))) {                          \
2677        if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) {         \
2678            vxsnan_flag = true;                                               \
2679        }                                                                     \
2680        t.VsrD(0) = xa->VsrD(0);                                              \
2681    } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) {                   \
2682        if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {         \
2683            vxsnan_flag = true;                                               \
2684        }                                                                     \
2685        t.VsrD(0) = xb->VsrD(0);                                              \
2686    } else if (float64_is_zero(xa->VsrD(0)) &&                                \
2687               float64_is_zero(xb->VsrD(0))) {                                \
2688        if (max) {                                                            \
2689            if (!float64_is_neg(xa->VsrD(0)) ||                               \
2690                !float64_is_neg(xb->VsrD(0))) {                               \
2691                t.VsrD(0) = 0ULL;                                             \
2692            } else {                                                          \
2693                t.VsrD(0) = 0x8000000000000000ULL;                            \
2694            }                                                                 \
2695        } else {                                                              \
2696            if (float64_is_neg(xa->VsrD(0)) ||                                \
2697                float64_is_neg(xb->VsrD(0))) {                                \
2698                t.VsrD(0) = 0x8000000000000000ULL;                            \
2699            } else {                                                          \
2700                t.VsrD(0) = 0ULL;                                             \
2701            }                                                                 \
2702        }                                                                     \
2703    } else if ((max &&                                                        \
2704               !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) ||     \
2705               (!max &&                                                       \
2706               float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) {      \
2707        t.VsrD(0) = xa->VsrD(0);                                              \
2708    } else {                                                                  \
2709        t.VsrD(0) = xb->VsrD(0);                                              \
2710    }                                                                         \
2711                                                                              \
2712    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2713    if (vxsnan_flag) {                                                        \
2714        float_invalid_op_vxsnan(env, GETPC());                                \
2715    }                                                                         \
2716    if (!vex_flag) {                                                          \
2717        *xt = t;                                                              \
2718    }                                                                         \
2719}                                                                             \
2720
2721VSX_MAX_MINJ(xsmaxjdp, 1);
2722VSX_MAX_MINJ(xsminjdp, 0);
2723
2724/*
2725 * VSX_CMP - VSX floating point compare
2726 *   op    - instruction mnemonic
2727 *   nels  - number of elements (1, 2 or 4)
2728 *   tp    - type (float32 or float64)
2729 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2730 *   cmp   - comparison operation
2731 *   svxvc - set VXVC bit
2732 *   exp   - expected result of comparison
2733 */
2734#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
2735uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                     \
2736                     ppc_vsr_t *xa, ppc_vsr_t *xb)                        \
2737{                                                                         \
2738    ppc_vsr_t t = *xt;                                                    \
2739    uint32_t crf6 = 0;                                                    \
2740    int i;                                                                \
2741    int all_true = 1;                                                     \
2742    int all_false = 1;                                                    \
2743                                                                          \
2744    for (i = 0; i < nels; i++) {                                          \
2745        if (unlikely(tp##_is_any_nan(xa->fld) ||                          \
2746                     tp##_is_any_nan(xb->fld))) {                         \
2747            if (tp##_is_signaling_nan(xa->fld, &env->fp_status) ||        \
2748                tp##_is_signaling_nan(xb->fld, &env->fp_status)) {        \
2749                float_invalid_op_vxsnan(env, GETPC());                    \
2750            }                                                             \
2751            if (svxvc) {                                                  \
2752                float_invalid_op_vxvc(env, 0, GETPC());                   \
2753            }                                                             \
2754            t.fld = 0;                                                    \
2755            all_true = 0;                                                 \
2756        } else {                                                          \
2757            if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) {   \
2758                t.fld = -1;                                               \
2759                all_false = 0;                                            \
2760            } else {                                                      \
2761                t.fld = 0;                                                \
2762                all_true = 0;                                             \
2763            }                                                             \
2764        }                                                                 \
2765    }                                                                     \
2766                                                                          \
2767    *xt = t;                                                              \
2768    crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);                  \
2769    return crf6;                                                          \
2770}
2771
2772VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2773VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2774VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2775VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2776VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2777VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2778VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2779VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2780
2781/*
2782 * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2783 *   op    - instruction mnemonic
2784 *   nels  - number of elements (1, 2 or 4)
2785 *   stp   - source type (float32 or float64)
2786 *   ttp   - target type (float32 or float64)
2787 *   sfld  - source vsr_t field
2788 *   tfld  - target vsr_t field (f32 or f64)
2789 *   sfprf - set FPRF
2790 */
2791#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2792void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)   \
2793{                                                                  \
2794    ppc_vsr_t t = *xt;                                             \
2795    int i;                                                         \
2796                                                                   \
2797    for (i = 0; i < nels; i++) {                                   \
2798        t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);        \
2799        if (unlikely(stp##_is_signaling_nan(xb->sfld,              \
2800                                            &env->fp_status))) {   \
2801            float_invalid_op_vxsnan(env, GETPC());                 \
2802            t.tfld = ttp##_snan_to_qnan(t.tfld);                   \
2803        }                                                          \
2804        if (sfprf) {                                               \
2805            helper_compute_fprf_##ttp(env, t.tfld);                \
2806        }                                                          \
2807    }                                                              \
2808                                                                   \
2809    *xt = t;                                                       \
2810    do_float_check_status(env, GETPC());                           \
2811}
2812
2813VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2814VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2815VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0)
2816VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
2817
2818/*
2819 * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2820 *   op    - instruction mnemonic
2821 *   nels  - number of elements (1, 2 or 4)
2822 *   stp   - source type (float32 or float64)
2823 *   ttp   - target type (float32 or float64)
2824 *   sfld  - source vsr_t field
2825 *   tfld  - target vsr_t field (f32 or f64)
2826 *   sfprf - set FPRF
2827 */
2828#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2829void helper_##op(CPUPPCState *env, uint32_t opcode,                       \
2830                 ppc_vsr_t *xt, ppc_vsr_t *xb)                            \
2831{                                                                       \
2832    ppc_vsr_t t = *xt;                                                  \
2833    int i;                                                              \
2834                                                                        \
2835    for (i = 0; i < nels; i++) {                                        \
2836        t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);             \
2837        if (unlikely(stp##_is_signaling_nan(xb->sfld,                   \
2838                                            &env->fp_status))) {        \
2839            float_invalid_op_vxsnan(env, GETPC());                      \
2840            t.tfld = ttp##_snan_to_qnan(t.tfld);                        \
2841        }                                                               \
2842        if (sfprf) {                                                    \
2843            helper_compute_fprf_##ttp(env, t.tfld);                     \
2844        }                                                               \
2845    }                                                                   \
2846                                                                        \
2847    *xt = t;                                                            \
2848    do_float_check_status(env, GETPC());                                \
2849}
2850
2851VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2852
2853/*
2854 * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2855 *                       involving one half precision value
2856 *   op    - instruction mnemonic
2857 *   nels  - number of elements (1, 2 or 4)
2858 *   stp   - source type
2859 *   ttp   - target type
2860 *   sfld  - source vsr_t field
2861 *   tfld  - target vsr_t field
2862 *   sfprf - set FPRF
2863 */
2864#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2865void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)   \
2866{                                                                  \
2867    ppc_vsr_t t = { };                                             \
2868    int i;                                                         \
2869                                                                   \
2870    for (i = 0; i < nels; i++) {                                   \
2871        t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status);     \
2872        if (unlikely(stp##_is_signaling_nan(xb->sfld,              \
2873                                            &env->fp_status))) {   \
2874            float_invalid_op_vxsnan(env, GETPC());                 \
2875            t.tfld = ttp##_snan_to_qnan(t.tfld);                   \
2876        }                                                          \
2877        if (sfprf) {                                               \
2878            helper_compute_fprf_##ttp(env, t.tfld);                \
2879        }                                                          \
2880    }                                                              \
2881                                                                   \
2882    *xt = t;                                                       \
2883    do_float_check_status(env, GETPC());                           \
2884}
2885
2886VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2887VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2888VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i  + 1), 0)
2889VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2890
2891/*
2892 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2893 * added to this later.
2894 */
2895void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode,
2896                     ppc_vsr_t *xt, ppc_vsr_t *xb)
2897{
2898    ppc_vsr_t t = { };
2899    float_status tstat;
2900
2901    tstat = env->fp_status;
2902    if (unlikely(Rc(opcode) != 0)) {
2903        tstat.float_rounding_mode = float_round_to_odd;
2904    }
2905
2906    t.VsrD(0) = float128_to_float64(xb->f128, &tstat);
2907    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2908    if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) {
2909        float_invalid_op_vxsnan(env, GETPC());
2910        t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0));
2911    }
2912    helper_compute_fprf_float64(env, t.VsrD(0));
2913
2914    *xt = t;
2915    do_float_check_status(env, GETPC());
2916}
2917
2918uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2919{
2920    uint64_t result, sign, exp, frac;
2921
2922    float_status tstat = env->fp_status;
2923    set_float_exception_flags(0, &tstat);
2924
2925    sign = extract64(xb, 63,  1);
2926    exp  = extract64(xb, 52, 11);
2927    frac = extract64(xb,  0, 52) | 0x10000000000000ULL;
2928
2929    if (unlikely(exp == 0 && extract64(frac, 0, 52) != 0)) {
2930        /* DP denormal operand.  */
2931        /* Exponent override to DP min exp.  */
2932        exp = 1;
2933        /* Implicit bit override to 0.  */
2934        frac = deposit64(frac, 53, 1, 0);
2935    }
2936
2937    if (unlikely(exp < 897 && frac != 0)) {
2938        /* SP tiny operand.  */
2939        if (897 - exp > 63) {
2940            frac = 0;
2941        } else {
2942            /* Denormalize until exp = SP min exp.  */
2943            frac >>= (897 - exp);
2944        }
2945        /* Exponent override to SP min exp - 1.  */
2946        exp = 896;
2947    }
2948
2949    result = sign << 31;
2950    result |= extract64(exp, 10, 1) << 30;
2951    result |= extract64(exp, 0, 7) << 23;
2952    result |= extract64(frac, 29, 23);
2953
2954    /* hardware replicates result to both words of the doubleword result.  */
2955    return (result << 32) | result;
2956}
2957
2958uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2959{
2960    float_status tstat = env->fp_status;
2961    set_float_exception_flags(0, &tstat);
2962
2963    return float32_to_float64(xb >> 32, &tstat);
2964}
2965
2966/*
2967 * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2968 *   op    - instruction mnemonic
2969 *   nels  - number of elements (1, 2 or 4)
2970 *   stp   - source type (float32 or float64)
2971 *   ttp   - target type (int32, uint32, int64 or uint64)
2972 *   sfld  - source vsr_t field
2973 *   tfld  - target vsr_t field
2974 *   rnan  - resulting NaN
2975 */
2976#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
2977void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2978{                                                                            \
2979    int all_flags = env->fp_status.float_exception_flags, flags;             \
2980    ppc_vsr_t t = *xt;                                                       \
2981    int i;                                                                   \
2982                                                                             \
2983    for (i = 0; i < nels; i++) {                                             \
2984        env->fp_status.float_exception_flags = 0;                            \
2985        t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status);  \
2986        flags = env->fp_status.float_exception_flags;                        \
2987        if (unlikely(flags & float_flag_invalid)) {                          \
2988            float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld));    \
2989            t.tfld = rnan;                                                   \
2990        }                                                                    \
2991        all_flags |= flags;                                                  \
2992    }                                                                        \
2993                                                                             \
2994    *xt = t;                                                                 \
2995    env->fp_status.float_exception_flags = all_flags;                        \
2996    do_float_check_status(env, GETPC());                                     \
2997}
2998
2999VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
3000                  0x8000000000000000ULL)
3001VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
3002                  0x80000000U)
3003VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
3004VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
3005VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
3006                  0x8000000000000000ULL)
3007VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2 * i), \
3008                  0x80000000U)
3009VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
3010VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2 * i), 0U)
3011VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), \
3012                  0x8000000000000000ULL)
3013VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
3014VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL)
3015VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
3016
3017/*
3018 * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
3019 *   op    - instruction mnemonic
3020 *   stp   - source type (float32 or float64)
3021 *   ttp   - target type (int32, uint32, int64 or uint64)
3022 *   sfld  - source vsr_t field
3023 *   tfld  - target vsr_t field
3024 *   rnan  - resulting NaN
3025 */
3026#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan)             \
3027void helper_##op(CPUPPCState *env, uint32_t opcode,                          \
3028                 ppc_vsr_t *xt, ppc_vsr_t *xb)                               \
3029{                                                                            \
3030    ppc_vsr_t t = { };                                                       \
3031                                                                             \
3032    t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status);      \
3033    if (env->fp_status.float_exception_flags & float_flag_invalid) {         \
3034        float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld));        \
3035        t.tfld = rnan;                                                       \
3036    }                                                                        \
3037                                                                             \
3038    *xt = t;                                                                 \
3039    do_float_check_status(env, GETPC());                                     \
3040}
3041
3042VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0),          \
3043                  0x8000000000000000ULL)
3044
3045VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0),          \
3046                  0xffffffff80000000ULL)
3047VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
3048VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
3049
3050/*
3051 * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
3052 *   op    - instruction mnemonic
3053 *   nels  - number of elements (1, 2 or 4)
3054 *   stp   - source type (int32, uint32, int64 or uint64)
3055 *   ttp   - target type (float32 or float64)
3056 *   sfld  - source vsr_t field
3057 *   tfld  - target vsr_t field
3058 *   jdef  - definition of the j index (i or 2*i)
3059 *   sfprf - set FPRF
3060 */
3061#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
3062void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)        \
3063{                                                                       \
3064    ppc_vsr_t t = *xt;                                                  \
3065    int i;                                                              \
3066                                                                        \
3067    for (i = 0; i < nels; i++) {                                        \
3068        t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);             \
3069        if (r2sp) {                                                     \
3070            t.tfld = helper_frsp(env, t.tfld);                          \
3071        }                                                               \
3072        if (sfprf) {                                                    \
3073            helper_compute_fprf_float64(env, t.tfld);                   \
3074        }                                                               \
3075    }                                                                   \
3076                                                                        \
3077    *xt = t;                                                            \
3078    do_float_check_status(env, GETPC());                                \
3079}
3080
3081VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3082VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3083VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3084VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3085VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3086VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3087VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
3088VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
3089VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2 * i), 0, 0)
3090VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2 * i), 0, 0)
3091VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3092VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3093
3094/*
3095 * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3096 *   op    - instruction mnemonic
3097 *   stp   - source type (int32, uint32, int64 or uint64)
3098 *   ttp   - target type (float32 or float64)
3099 *   sfld  - source vsr_t field
3100 *   tfld  - target vsr_t field
3101 */
3102#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld)              \
3103void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
3104                 ppc_vsr_t *xt, ppc_vsr_t *xb)                          \
3105{                                                                       \
3106    ppc_vsr_t t = *xt;                                                  \
3107                                                                        \
3108    t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);                 \
3109    helper_compute_fprf_##ttp(env, t.tfld);                             \
3110                                                                        \
3111    *xt = t;                                                            \
3112    do_float_check_status(env, GETPC());                                \
3113}
3114
3115VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3116VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3117
3118/*
3119 * For "use current rounding mode", define a value that will not be
3120 * one of the existing rounding model enums.
3121 */
3122#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3123  float_round_up + float_round_to_zero)
3124
3125/*
3126 * VSX_ROUND - VSX floating point round
3127 *   op    - instruction mnemonic
3128 *   nels  - number of elements (1, 2 or 4)
3129 *   tp    - type (float32 or float64)
3130 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3131 *   rmode - rounding mode
3132 *   sfprf - set FPRF
3133 */
3134#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf)                     \
3135void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)       \
3136{                                                                      \
3137    ppc_vsr_t t = *xt;                                                 \
3138    int i;                                                             \
3139                                                                       \
3140    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3141        set_float_rounding_mode(rmode, &env->fp_status);               \
3142    }                                                                  \
3143                                                                       \
3144    for (i = 0; i < nels; i++) {                                       \
3145        if (unlikely(tp##_is_signaling_nan(xb->fld,                    \
3146                                           &env->fp_status))) {        \
3147            float_invalid_op_vxsnan(env, GETPC());                     \
3148            t.fld = tp##_snan_to_qnan(xb->fld);                        \
3149        } else {                                                       \
3150            t.fld = tp##_round_to_int(xb->fld, &env->fp_status);       \
3151        }                                                              \
3152        if (sfprf) {                                                   \
3153            helper_compute_fprf_float64(env, t.fld);                   \
3154        }                                                              \
3155    }                                                                  \
3156                                                                       \
3157    /*                                                                 \
3158     * If this is not a "use current rounding mode" instruction,       \
3159     * then inhibit setting of the XX bit and restore rounding         \
3160     * mode from FPSCR                                                 \
3161     */                                                                \
3162    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3163        fpscr_set_rounding_mode(env);                                  \
3164        env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
3165    }                                                                  \
3166                                                                       \
3167    *xt = t;                                                           \
3168    do_float_check_status(env, GETPC());                               \
3169}
3170
3171VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3172VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3173VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3174VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3175VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3176
3177VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3178VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3179VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3180VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3181VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3182
3183VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3184VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3185VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3186VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3187VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3188
3189uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3190{
3191    helper_reset_fpstatus(env);
3192
3193    uint64_t xt = helper_frsp(env, xb);
3194
3195    helper_compute_fprf_float64(env, xt);
3196    do_float_check_status(env, GETPC());
3197    return xt;
3198}
3199
3200#define VSX_XXPERM(op, indexed)                                       \
3201void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                     \
3202                 ppc_vsr_t *xa, ppc_vsr_t *pcv)                       \
3203{                                                                     \
3204    ppc_vsr_t t = *xt;                                                \
3205    int i, idx;                                                       \
3206                                                                      \
3207    for (i = 0; i < 16; i++) {                                        \
3208        idx = pcv->VsrB(i) & 0x1F;                                    \
3209        if (indexed) {                                                \
3210            idx = 31 - idx;                                           \
3211        }                                                             \
3212        t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx)                       \
3213                                : xt->VsrB(idx - 16);                 \
3214    }                                                                 \
3215    *xt = t;                                                          \
3216}
3217
3218VSX_XXPERM(xxperm, 0)
3219VSX_XXPERM(xxpermr, 1)
3220
3221void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
3222{
3223    ppc_vsr_t t = { };
3224    uint32_t exp, i, fraction;
3225
3226    for (i = 0; i < 4; i++) {
3227        exp = (xb->VsrW(i) >> 23) & 0xFF;
3228        fraction = xb->VsrW(i) & 0x7FFFFF;
3229        if (exp != 0 && exp != 255) {
3230            t.VsrW(i) = fraction | 0x00800000;
3231        } else {
3232            t.VsrW(i) = fraction;
3233        }
3234    }
3235    *xt = t;
3236}
3237
3238/*
3239 * VSX_TEST_DC - VSX floating point test data class
3240 *   op    - instruction mnemonic
3241 *   nels  - number of elements (1, 2 or 4)
3242 *   xbn   - VSR register number
3243 *   tp    - type (float32 or float64)
3244 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3245 *   tfld   - target vsr_t field (VsrD(*) or VsrW(*))
3246 *   fld_max - target field max
3247 *   scrf - set result in CR and FPCC
3248 */
3249#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf)  \
3250void helper_##op(CPUPPCState *env, uint32_t opcode)         \
3251{                                                           \
3252    ppc_vsr_t *xt = &env->vsr[xT(opcode)];                  \
3253    ppc_vsr_t *xb = &env->vsr[xbn];                         \
3254    ppc_vsr_t t = { };                                      \
3255    uint32_t i, sign, dcmx;                                 \
3256    uint32_t cc, match = 0;                                 \
3257                                                            \
3258    if (!scrf) {                                            \
3259        dcmx = DCMX_XV(opcode);                             \
3260    } else {                                                \
3261        t = *xt;                                            \
3262        dcmx = DCMX(opcode);                                \
3263    }                                                       \
3264                                                            \
3265    for (i = 0; i < nels; i++) {                            \
3266        sign = tp##_is_neg(xb->fld);                        \
3267        if (tp##_is_any_nan(xb->fld)) {                     \
3268            match = extract32(dcmx, 6, 1);                  \
3269        } else if (tp##_is_infinity(xb->fld)) {             \
3270            match = extract32(dcmx, 4 + !sign, 1);          \
3271        } else if (tp##_is_zero(xb->fld)) {                 \
3272            match = extract32(dcmx, 2 + !sign, 1);          \
3273        } else if (tp##_is_zero_or_denormal(xb->fld)) {     \
3274            match = extract32(dcmx, 0 + !sign, 1);          \
3275        }                                                   \
3276                                                            \
3277        if (scrf) {                                         \
3278            cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT;  \
3279            env->fpscr &= ~FP_FPCC;                         \
3280            env->fpscr |= cc << FPSCR_FPCC;                 \
3281            env->crf[BF(opcode)] = cc;                      \
3282        } else {                                            \
3283            t.tfld = match ? fld_max : 0;                   \
3284        }                                                   \
3285        match = 0;                                          \
3286    }                                                       \
3287    if (!scrf) {                                            \
3288        *xt = t;                                            \
3289    }                                                       \
3290}
3291
3292VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3293VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3294VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3295VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3296
3297void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)
3298{
3299    uint32_t dcmx, sign, exp;
3300    uint32_t cc, match = 0, not_sp = 0;
3301
3302    dcmx = DCMX(opcode);
3303    exp = (xb->VsrD(0) >> 52) & 0x7FF;
3304
3305    sign = float64_is_neg(xb->VsrD(0));
3306    if (float64_is_any_nan(xb->VsrD(0))) {
3307        match = extract32(dcmx, 6, 1);
3308    } else if (float64_is_infinity(xb->VsrD(0))) {
3309        match = extract32(dcmx, 4 + !sign, 1);
3310    } else if (float64_is_zero(xb->VsrD(0))) {
3311        match = extract32(dcmx, 2 + !sign, 1);
3312    } else if (float64_is_zero_or_denormal(xb->VsrD(0)) ||
3313               (exp > 0 && exp < 0x381)) {
3314        match = extract32(dcmx, 0 + !sign, 1);
3315    }
3316
3317    not_sp = !float64_eq(xb->VsrD(0),
3318                         float32_to_float64(
3319                             float64_to_float32(xb->VsrD(0), &env->fp_status),
3320                             &env->fp_status), &env->fp_status);
3321
3322    cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3323    env->fpscr &= ~FP_FPCC;
3324    env->fpscr |= cc << FPSCR_FPCC;
3325    env->crf[BF(opcode)] = cc;
3326}
3327
3328void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
3329                   ppc_vsr_t *xt, ppc_vsr_t *xb)
3330{
3331    ppc_vsr_t t = { };
3332    uint8_t r = Rrm(opcode);
3333    uint8_t ex = Rc(opcode);
3334    uint8_t rmc = RMC(opcode);
3335    uint8_t rmode = 0;
3336    float_status tstat;
3337
3338    helper_reset_fpstatus(env);
3339
3340    if (r == 0 && rmc == 0) {
3341        rmode = float_round_ties_away;
3342    } else if (r == 0 && rmc == 0x3) {
3343        rmode = fpscr_rn;
3344    } else if (r == 1) {
3345        switch (rmc) {
3346        case 0:
3347            rmode = float_round_nearest_even;
3348            break;
3349        case 1:
3350            rmode = float_round_to_zero;
3351            break;
3352        case 2:
3353            rmode = float_round_up;
3354            break;
3355        case 3:
3356            rmode = float_round_down;
3357            break;
3358        default:
3359            abort();
3360        }
3361    }
3362
3363    tstat = env->fp_status;
3364    set_float_exception_flags(0, &tstat);
3365    set_float_rounding_mode(rmode, &tstat);
3366    t.f128 = float128_round_to_int(xb->f128, &tstat);
3367    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3368
3369    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3370        if (float128_is_signaling_nan(xb->f128, &tstat)) {
3371            float_invalid_op_vxsnan(env, GETPC());
3372            t.f128 = float128_snan_to_qnan(t.f128);
3373        }
3374    }
3375
3376    if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3377        env->fp_status.float_exception_flags &= ~float_flag_inexact;
3378    }
3379
3380    helper_compute_fprf_float128(env, t.f128);
3381    do_float_check_status(env, GETPC());
3382    *xt = t;
3383}
3384
3385void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
3386                    ppc_vsr_t *xt, ppc_vsr_t *xb)
3387{
3388    ppc_vsr_t t = { };
3389    uint8_t r = Rrm(opcode);
3390    uint8_t rmc = RMC(opcode);
3391    uint8_t rmode = 0;
3392    floatx80 round_res;
3393    float_status tstat;
3394
3395    helper_reset_fpstatus(env);
3396
3397    if (r == 0 && rmc == 0) {
3398        rmode = float_round_ties_away;
3399    } else if (r == 0 && rmc == 0x3) {
3400        rmode = fpscr_rn;
3401    } else if (r == 1) {
3402        switch (rmc) {
3403        case 0:
3404            rmode = float_round_nearest_even;
3405            break;
3406        case 1:
3407            rmode = float_round_to_zero;
3408            break;
3409        case 2:
3410            rmode = float_round_up;
3411            break;
3412        case 3:
3413            rmode = float_round_down;
3414            break;
3415        default:
3416            abort();
3417        }
3418    }
3419
3420    tstat = env->fp_status;
3421    set_float_exception_flags(0, &tstat);
3422    set_float_rounding_mode(rmode, &tstat);
3423    round_res = float128_to_floatx80(xb->f128, &tstat);
3424    t.f128 = floatx80_to_float128(round_res, &tstat);
3425    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3426
3427    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3428        if (float128_is_signaling_nan(xb->f128, &tstat)) {
3429            float_invalid_op_vxsnan(env, GETPC());
3430            t.f128 = float128_snan_to_qnan(t.f128);
3431        }
3432    }
3433
3434    helper_compute_fprf_float128(env, t.f128);
3435    *xt = t;
3436    do_float_check_status(env, GETPC());
3437}
3438
3439void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
3440                     ppc_vsr_t *xt, ppc_vsr_t *xb)
3441{
3442    ppc_vsr_t t = { };
3443    float_status tstat;
3444
3445    helper_reset_fpstatus(env);
3446
3447    tstat = env->fp_status;
3448    if (unlikely(Rc(opcode) != 0)) {
3449        tstat.float_rounding_mode = float_round_to_odd;
3450    }
3451
3452    set_float_exception_flags(0, &tstat);
3453    t.f128 = float128_sqrt(xb->f128, &tstat);
3454    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3455
3456    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3457        if (float128_is_signaling_nan(xb->f128, &tstat)) {
3458            float_invalid_op_vxsnan(env, GETPC());
3459            t.f128 = float128_snan_to_qnan(xb->f128);
3460        } else if (float128_is_quiet_nan(xb->f128, &tstat)) {
3461            t.f128 = xb->f128;
3462        } else if (float128_is_neg(xb->f128) && !float128_is_zero(xb->f128)) {
3463            float_invalid_op_vxsqrt(env, 1, GETPC());
3464            t.f128 = float128_default_nan(&env->fp_status);
3465        }
3466    }
3467
3468    helper_compute_fprf_float128(env, t.f128);
3469    *xt = t;
3470    do_float_check_status(env, GETPC());
3471}
3472
3473void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
3474                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
3475{
3476    ppc_vsr_t t = *xt;
3477    float_status tstat;
3478
3479    helper_reset_fpstatus(env);
3480
3481    tstat = env->fp_status;
3482    if (unlikely(Rc(opcode) != 0)) {
3483        tstat.float_rounding_mode = float_round_to_odd;
3484    }
3485
3486    set_float_exception_flags(0, &tstat);
3487    t.f128 = float128_sub(xa->f128, xb->f128, &tstat);
3488    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3489
3490    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3491        float_invalid_op_addsub(env, 1, GETPC(),
3492                                float128_classify(xa->f128) |
3493                                float128_classify(xb->f128));
3494    }
3495
3496    helper_compute_fprf_float128(env, t.f128);
3497    *xt = t;
3498    do_float_check_status(env, GETPC());
3499}
3500