qemu/target/ppc/fpu_helper.c
<<
>>
Prefs
   1/*
   2 *  PowerPC floating point and SPE emulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "exec/helper-proto.h"
  22#include "exec/exec-all.h"
  23#include "internal.h"
  24#include "fpu/softfloat.h"
  25
  26static inline float128 float128_snan_to_qnan(float128 x)
  27{
  28    float128 r;
  29
  30    r.high = x.high | 0x0000800000000000;
  31    r.low = x.low;
  32    return r;
  33}
  34
  35#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
  36#define float32_snan_to_qnan(x) ((x) | 0x00400000)
  37#define float16_snan_to_qnan(x) ((x) | 0x0200)
  38
  39/*****************************************************************************/
  40/* Floating point operations helpers */
  41uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
  42{
  43    CPU_FloatU f;
  44    CPU_DoubleU d;
  45
  46    f.l = arg;
  47    d.d = float32_to_float64(f.f, &env->fp_status);
  48    return d.ll;
  49}
  50
  51uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
  52{
  53    CPU_FloatU f;
  54    CPU_DoubleU d;
  55
  56    d.ll = arg;
  57    f.f = float64_to_float32(d.d, &env->fp_status);
  58    return f.l;
  59}
  60
  61static inline int ppc_float32_get_unbiased_exp(float32 f)
  62{
  63    return ((f >> 23) & 0xFF) - 127;
  64}
  65
  66static inline int ppc_float64_get_unbiased_exp(float64 f)
  67{
  68    return ((f >> 52) & 0x7FF) - 1023;
  69}
  70
  71#define COMPUTE_FPRF(tp)                                       \
  72void helper_compute_fprf_##tp(CPUPPCState *env, tp arg)        \
  73{                                                              \
  74    int isneg;                                                 \
  75    int fprf;                                                  \
  76                                                               \
  77    isneg = tp##_is_neg(arg);                                  \
  78    if (unlikely(tp##_is_any_nan(arg))) {                      \
  79        if (tp##_is_signaling_nan(arg, &env->fp_status)) {     \
  80            /* Signaling NaN: flags are undefined */           \
  81            fprf = 0x00;                                       \
  82        } else {                                               \
  83            /* Quiet NaN */                                    \
  84            fprf = 0x11;                                       \
  85        }                                                      \
  86    } else if (unlikely(tp##_is_infinity(arg))) {              \
  87        /* +/- infinity */                                     \
  88        if (isneg) {                                           \
  89            fprf = 0x09;                                       \
  90        } else {                                               \
  91            fprf = 0x05;                                       \
  92        }                                                      \
  93    } else {                                                   \
  94        if (tp##_is_zero(arg)) {                               \
  95            /* +/- zero */                                     \
  96            if (isneg) {                                       \
  97                fprf = 0x12;                                   \
  98            } else {                                           \
  99                fprf = 0x02;                                   \
 100            }                                                  \
 101        } else {                                               \
 102            if (tp##_is_zero_or_denormal(arg)) {               \
 103                /* Denormalized numbers */                     \
 104                fprf = 0x10;                                   \
 105            } else {                                           \
 106                /* Normalized numbers */                       \
 107                fprf = 0x00;                                   \
 108            }                                                  \
 109            if (isneg) {                                       \
 110                fprf |= 0x08;                                  \
 111            } else {                                           \
 112                fprf |= 0x04;                                  \
 113            }                                                  \
 114        }                                                      \
 115    }                                                          \
 116    /* We update FPSCR_FPRF */                                 \
 117    env->fpscr &= ~(0x1F << FPSCR_FPRF);                       \
 118    env->fpscr |= fprf << FPSCR_FPRF;                          \
 119}
 120
 121COMPUTE_FPRF(float16)
 122COMPUTE_FPRF(float32)
 123COMPUTE_FPRF(float64)
 124COMPUTE_FPRF(float128)
 125
 126/* Floating-point invalid operations exception */
 127static inline __attribute__((__always_inline__))
 128uint64_t float_invalid_op_excp(CPUPPCState *env, int op, int set_fpcc)
 129{
 130    CPUState *cs = CPU(ppc_env_get_cpu(env));
 131    uint64_t ret = 0;
 132    int ve;
 133
 134    ve = fpscr_ve;
 135    switch (op) {
 136    case POWERPC_EXCP_FP_VXSNAN:
 137        env->fpscr |= 1 << FPSCR_VXSNAN;
 138        break;
 139    case POWERPC_EXCP_FP_VXSOFT:
 140        env->fpscr |= 1 << FPSCR_VXSOFT;
 141        break;
 142    case POWERPC_EXCP_FP_VXISI:
 143        /* Magnitude subtraction of infinities */
 144        env->fpscr |= 1 << FPSCR_VXISI;
 145        goto update_arith;
 146    case POWERPC_EXCP_FP_VXIDI:
 147        /* Division of infinity by infinity */
 148        env->fpscr |= 1 << FPSCR_VXIDI;
 149        goto update_arith;
 150    case POWERPC_EXCP_FP_VXZDZ:
 151        /* Division of zero by zero */
 152        env->fpscr |= 1 << FPSCR_VXZDZ;
 153        goto update_arith;
 154    case POWERPC_EXCP_FP_VXIMZ:
 155        /* Multiplication of zero by infinity */
 156        env->fpscr |= 1 << FPSCR_VXIMZ;
 157        goto update_arith;
 158    case POWERPC_EXCP_FP_VXVC:
 159        /* Ordered comparison of NaN */
 160        env->fpscr |= 1 << FPSCR_VXVC;
 161        if (set_fpcc) {
 162            env->fpscr &= ~(0xF << FPSCR_FPCC);
 163            env->fpscr |= 0x11 << FPSCR_FPCC;
 164        }
 165        /* We must update the target FPR before raising the exception */
 166        if (ve != 0) {
 167            cs->exception_index = POWERPC_EXCP_PROGRAM;
 168            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
 169            /* Update the floating-point enabled exception summary */
 170            env->fpscr |= 1 << FPSCR_FEX;
 171            /* Exception is differed */
 172            ve = 0;
 173        }
 174        break;
 175    case POWERPC_EXCP_FP_VXSQRT:
 176        /* Square root of a negative number */
 177        env->fpscr |= 1 << FPSCR_VXSQRT;
 178    update_arith:
 179        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 180        if (ve == 0) {
 181            /* Set the result to quiet NaN */
 182            ret = 0x7FF8000000000000ULL;
 183            if (set_fpcc) {
 184                env->fpscr &= ~(0xF << FPSCR_FPCC);
 185                env->fpscr |= 0x11 << FPSCR_FPCC;
 186            }
 187        }
 188        break;
 189    case POWERPC_EXCP_FP_VXCVI:
 190        /* Invalid conversion */
 191        env->fpscr |= 1 << FPSCR_VXCVI;
 192        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 193        if (ve == 0) {
 194            /* Set the result to quiet NaN */
 195            ret = 0x7FF8000000000000ULL;
 196            if (set_fpcc) {
 197                env->fpscr &= ~(0xF << FPSCR_FPCC);
 198                env->fpscr |= 0x11 << FPSCR_FPCC;
 199            }
 200        }
 201        break;
 202    }
 203    /* Update the floating-point invalid operation summary */
 204    env->fpscr |= 1 << FPSCR_VX;
 205    /* Update the floating-point exception summary */
 206    env->fpscr |= FP_FX;
 207    if (ve != 0) {
 208        /* Update the floating-point enabled exception summary */
 209        env->fpscr |= 1 << FPSCR_FEX;
 210        if (msr_fe0 != 0 || msr_fe1 != 0) {
 211            /* GETPC() works here because this is inline */
 212            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 213                                   POWERPC_EXCP_FP | op, GETPC());
 214        }
 215    }
 216    return ret;
 217}
 218
 219static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
 220{
 221    env->fpscr |= 1 << FPSCR_ZX;
 222    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 223    /* Update the floating-point exception summary */
 224    env->fpscr |= FP_FX;
 225    if (fpscr_ze != 0) {
 226        /* Update the floating-point enabled exception summary */
 227        env->fpscr |= 1 << FPSCR_FEX;
 228        if (msr_fe0 != 0 || msr_fe1 != 0) {
 229            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 230                                   POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
 231                                   raddr);
 232        }
 233    }
 234}
 235
 236static inline void float_overflow_excp(CPUPPCState *env)
 237{
 238    CPUState *cs = CPU(ppc_env_get_cpu(env));
 239
 240    env->fpscr |= 1 << FPSCR_OX;
 241    /* Update the floating-point exception summary */
 242    env->fpscr |= FP_FX;
 243    if (fpscr_oe != 0) {
 244        /* XXX: should adjust the result */
 245        /* Update the floating-point enabled exception summary */
 246        env->fpscr |= 1 << FPSCR_FEX;
 247        /* We must update the target FPR before raising the exception */
 248        cs->exception_index = POWERPC_EXCP_PROGRAM;
 249        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 250    } else {
 251        env->fpscr |= 1 << FPSCR_XX;
 252        env->fpscr |= 1 << FPSCR_FI;
 253    }
 254}
 255
 256static inline void float_underflow_excp(CPUPPCState *env)
 257{
 258    CPUState *cs = CPU(ppc_env_get_cpu(env));
 259
 260    env->fpscr |= 1 << FPSCR_UX;
 261    /* Update the floating-point exception summary */
 262    env->fpscr |= FP_FX;
 263    if (fpscr_ue != 0) {
 264        /* XXX: should adjust the result */
 265        /* Update the floating-point enabled exception summary */
 266        env->fpscr |= 1 << FPSCR_FEX;
 267        /* We must update the target FPR before raising the exception */
 268        cs->exception_index = POWERPC_EXCP_PROGRAM;
 269        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 270    }
 271}
 272
 273static inline void float_inexact_excp(CPUPPCState *env)
 274{
 275    CPUState *cs = CPU(ppc_env_get_cpu(env));
 276
 277    env->fpscr |= 1 << FPSCR_XX;
 278    /* Update the floating-point exception summary */
 279    env->fpscr |= FP_FX;
 280    if (fpscr_xe != 0) {
 281        /* Update the floating-point enabled exception summary */
 282        env->fpscr |= 1 << FPSCR_FEX;
 283        /* We must update the target FPR before raising the exception */
 284        cs->exception_index = POWERPC_EXCP_PROGRAM;
 285        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 286    }
 287}
 288
 289static inline void fpscr_set_rounding_mode(CPUPPCState *env)
 290{
 291    int rnd_type;
 292
 293    /* Set rounding mode */
 294    switch (fpscr_rn) {
 295    case 0:
 296        /* Best approximation (round to nearest) */
 297        rnd_type = float_round_nearest_even;
 298        break;
 299    case 1:
 300        /* Smaller magnitude (round toward zero) */
 301        rnd_type = float_round_to_zero;
 302        break;
 303    case 2:
 304        /* Round toward +infinite */
 305        rnd_type = float_round_up;
 306        break;
 307    default:
 308    case 3:
 309        /* Round toward -infinite */
 310        rnd_type = float_round_down;
 311        break;
 312    }
 313    set_float_rounding_mode(rnd_type, &env->fp_status);
 314}
 315
 316void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
 317{
 318    int prev;
 319
 320    prev = (env->fpscr >> bit) & 1;
 321    env->fpscr &= ~(1 << bit);
 322    if (prev == 1) {
 323        switch (bit) {
 324        case FPSCR_RN1:
 325        case FPSCR_RN:
 326            fpscr_set_rounding_mode(env);
 327            break;
 328        default:
 329            break;
 330        }
 331    }
 332}
 333
 334void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
 335{
 336    CPUState *cs = CPU(ppc_env_get_cpu(env));
 337    int prev;
 338
 339    prev = (env->fpscr >> bit) & 1;
 340    env->fpscr |= 1 << bit;
 341    if (prev == 0) {
 342        switch (bit) {
 343        case FPSCR_VX:
 344            env->fpscr |= FP_FX;
 345            if (fpscr_ve) {
 346                goto raise_ve;
 347            }
 348            break;
 349        case FPSCR_OX:
 350            env->fpscr |= FP_FX;
 351            if (fpscr_oe) {
 352                goto raise_oe;
 353            }
 354            break;
 355        case FPSCR_UX:
 356            env->fpscr |= FP_FX;
 357            if (fpscr_ue) {
 358                goto raise_ue;
 359            }
 360            break;
 361        case FPSCR_ZX:
 362            env->fpscr |= FP_FX;
 363            if (fpscr_ze) {
 364                goto raise_ze;
 365            }
 366            break;
 367        case FPSCR_XX:
 368            env->fpscr |= FP_FX;
 369            if (fpscr_xe) {
 370                goto raise_xe;
 371            }
 372            break;
 373        case FPSCR_VXSNAN:
 374        case FPSCR_VXISI:
 375        case FPSCR_VXIDI:
 376        case FPSCR_VXZDZ:
 377        case FPSCR_VXIMZ:
 378        case FPSCR_VXVC:
 379        case FPSCR_VXSOFT:
 380        case FPSCR_VXSQRT:
 381        case FPSCR_VXCVI:
 382            env->fpscr |= 1 << FPSCR_VX;
 383            env->fpscr |= FP_FX;
 384            if (fpscr_ve != 0) {
 385                goto raise_ve;
 386            }
 387            break;
 388        case FPSCR_VE:
 389            if (fpscr_vx != 0) {
 390            raise_ve:
 391                env->error_code = POWERPC_EXCP_FP;
 392                if (fpscr_vxsnan) {
 393                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
 394                }
 395                if (fpscr_vxisi) {
 396                    env->error_code |= POWERPC_EXCP_FP_VXISI;
 397                }
 398                if (fpscr_vxidi) {
 399                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
 400                }
 401                if (fpscr_vxzdz) {
 402                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
 403                }
 404                if (fpscr_vximz) {
 405                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
 406                }
 407                if (fpscr_vxvc) {
 408                    env->error_code |= POWERPC_EXCP_FP_VXVC;
 409                }
 410                if (fpscr_vxsoft) {
 411                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
 412                }
 413                if (fpscr_vxsqrt) {
 414                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
 415                }
 416                if (fpscr_vxcvi) {
 417                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
 418                }
 419                goto raise_excp;
 420            }
 421            break;
 422        case FPSCR_OE:
 423            if (fpscr_ox != 0) {
 424            raise_oe:
 425                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 426                goto raise_excp;
 427            }
 428            break;
 429        case FPSCR_UE:
 430            if (fpscr_ux != 0) {
 431            raise_ue:
 432                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 433                goto raise_excp;
 434            }
 435            break;
 436        case FPSCR_ZE:
 437            if (fpscr_zx != 0) {
 438            raise_ze:
 439                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
 440                goto raise_excp;
 441            }
 442            break;
 443        case FPSCR_XE:
 444            if (fpscr_xx != 0) {
 445            raise_xe:
 446                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 447                goto raise_excp;
 448            }
 449            break;
 450        case FPSCR_RN1:
 451        case FPSCR_RN:
 452            fpscr_set_rounding_mode(env);
 453            break;
 454        default:
 455            break;
 456        raise_excp:
 457            /* Update the floating-point enabled exception summary */
 458            env->fpscr |= 1 << FPSCR_FEX;
 459            /* We have to update Rc1 before raising the exception */
 460            cs->exception_index = POWERPC_EXCP_PROGRAM;
 461            break;
 462        }
 463    }
 464}
 465
 466void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 467{
 468    CPUState *cs = CPU(ppc_env_get_cpu(env));
 469    target_ulong prev, new;
 470    int i;
 471
 472    prev = env->fpscr;
 473    new = (target_ulong)arg;
 474    new &= ~0x60000000LL;
 475    new |= prev & 0x60000000LL;
 476    for (i = 0; i < sizeof(target_ulong) * 2; i++) {
 477        if (mask & (1 << i)) {
 478            env->fpscr &= ~(0xFLL << (4 * i));
 479            env->fpscr |= new & (0xFLL << (4 * i));
 480        }
 481    }
 482    /* Update VX and FEX */
 483    if (fpscr_ix != 0) {
 484        env->fpscr |= 1 << FPSCR_VX;
 485    } else {
 486        env->fpscr &= ~(1 << FPSCR_VX);
 487    }
 488    if ((fpscr_ex & fpscr_eex) != 0) {
 489        env->fpscr |= 1 << FPSCR_FEX;
 490        cs->exception_index = POWERPC_EXCP_PROGRAM;
 491        /* XXX: we should compute it properly */
 492        env->error_code = POWERPC_EXCP_FP;
 493    } else {
 494        env->fpscr &= ~(1 << FPSCR_FEX);
 495    }
 496    fpscr_set_rounding_mode(env);
 497}
 498
 499void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 500{
 501    helper_store_fpscr(env, arg, mask);
 502}
 503
 504static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
 505{
 506    CPUState *cs = CPU(ppc_env_get_cpu(env));
 507    int status = get_float_exception_flags(&env->fp_status);
 508
 509    if (status & float_flag_divbyzero) {
 510        float_zero_divide_excp(env, raddr);
 511    } else if (status & float_flag_overflow) {
 512        float_overflow_excp(env);
 513    } else if (status & float_flag_underflow) {
 514        float_underflow_excp(env);
 515    } else if (status & float_flag_inexact) {
 516        float_inexact_excp(env);
 517    }
 518
 519    if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
 520        (env->error_code & POWERPC_EXCP_FP)) {
 521        /* Differred floating-point exception after target FPR update */
 522        if (msr_fe0 != 0 || msr_fe1 != 0) {
 523            raise_exception_err_ra(env, cs->exception_index,
 524                                   env->error_code, raddr);
 525        }
 526    }
 527}
 528
 529static inline  __attribute__((__always_inline__))
 530void float_check_status(CPUPPCState *env)
 531{
 532    /* GETPC() works here because this is inline */
 533    do_float_check_status(env, GETPC());
 534}
 535
 536void helper_float_check_status(CPUPPCState *env)
 537{
 538    do_float_check_status(env, GETPC());
 539}
 540
 541void helper_reset_fpstatus(CPUPPCState *env)
 542{
 543    set_float_exception_flags(0, &env->fp_status);
 544}
 545
 546/* fadd - fadd. */
 547uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
 548{
 549    CPU_DoubleU farg1, farg2;
 550
 551    farg1.ll = arg1;
 552    farg2.ll = arg2;
 553
 554    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
 555                 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
 556        /* Magnitude subtraction of infinities */
 557        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
 558    } else {
 559        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
 560                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
 561            /* sNaN addition */
 562            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 563        }
 564        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
 565    }
 566
 567    return farg1.ll;
 568}
 569
 570/* fsub - fsub. */
 571uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
 572{
 573    CPU_DoubleU farg1, farg2;
 574
 575    farg1.ll = arg1;
 576    farg2.ll = arg2;
 577
 578    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
 579                 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
 580        /* Magnitude subtraction of infinities */
 581        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
 582    } else {
 583        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
 584                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
 585            /* sNaN subtraction */
 586            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 587        }
 588        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
 589    }
 590
 591    return farg1.ll;
 592}
 593
 594/* fmul - fmul. */
 595uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
 596{
 597    CPU_DoubleU farg1, farg2;
 598
 599    farg1.ll = arg1;
 600    farg2.ll = arg2;
 601
 602    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
 603                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
 604        /* Multiplication of zero by infinity */
 605        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
 606    } else {
 607        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
 608                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
 609            /* sNaN multiplication */
 610            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 611        }
 612        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
 613    }
 614
 615    return farg1.ll;
 616}
 617
 618/* fdiv - fdiv. */
 619uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
 620{
 621    CPU_DoubleU farg1, farg2;
 622
 623    farg1.ll = arg1;
 624    farg2.ll = arg2;
 625
 626    if (unlikely(float64_is_infinity(farg1.d) &&
 627                 float64_is_infinity(farg2.d))) {
 628        /* Division of infinity by infinity */
 629        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
 630    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
 631        /* Division of zero by zero */
 632        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
 633    } else {
 634        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
 635                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
 636            /* sNaN division */
 637            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 638        }
 639        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
 640    }
 641
 642    return farg1.ll;
 643}
 644
 645
 646#define FPU_FCTI(op, cvt, nanval)                                      \
 647uint64_t helper_##op(CPUPPCState *env, uint64_t arg)                   \
 648{                                                                      \
 649    CPU_DoubleU farg;                                                  \
 650                                                                       \
 651    farg.ll = arg;                                                     \
 652    farg.ll = float64_to_##cvt(farg.d, &env->fp_status);               \
 653                                                                       \
 654    if (unlikely(env->fp_status.float_exception_flags)) {              \
 655        if (float64_is_any_nan(arg)) {                                 \
 656            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
 657            if (float64_is_signaling_nan(arg, &env->fp_status)) {      \
 658                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
 659            }                                                          \
 660            farg.ll = nanval;                                          \
 661        } else if (env->fp_status.float_exception_flags &              \
 662                   float_flag_invalid) {                               \
 663            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
 664        }                                                              \
 665        float_check_status(env);                                       \
 666    }                                                                  \
 667    return farg.ll;                                                    \
 668 }
 669
 670FPU_FCTI(fctiw, int32, 0x80000000U)
 671FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
 672FPU_FCTI(fctiwu, uint32, 0x00000000U)
 673FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
 674FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
 675FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
 676FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
 677FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
 678
 679#define FPU_FCFI(op, cvtr, is_single)                      \
 680uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
 681{                                                          \
 682    CPU_DoubleU farg;                                      \
 683                                                           \
 684    if (is_single) {                                       \
 685        float32 tmp = cvtr(arg, &env->fp_status);          \
 686        farg.d = float32_to_float64(tmp, &env->fp_status); \
 687    } else {                                               \
 688        farg.d = cvtr(arg, &env->fp_status);               \
 689    }                                                      \
 690    float_check_status(env);                               \
 691    return farg.ll;                                        \
 692}
 693
 694FPU_FCFI(fcfid, int64_to_float64, 0)
 695FPU_FCFI(fcfids, int64_to_float32, 1)
 696FPU_FCFI(fcfidu, uint64_to_float64, 0)
 697FPU_FCFI(fcfidus, uint64_to_float32, 1)
 698
 699static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
 700                              int rounding_mode)
 701{
 702    CPU_DoubleU farg;
 703
 704    farg.ll = arg;
 705
 706    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 707        /* sNaN round */
 708        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 709        farg.ll = arg | 0x0008000000000000ULL;
 710    } else {
 711        int inexact = get_float_exception_flags(&env->fp_status) &
 712                      float_flag_inexact;
 713        set_float_rounding_mode(rounding_mode, &env->fp_status);
 714        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
 715        /* Restore rounding mode from FPSCR */
 716        fpscr_set_rounding_mode(env);
 717
 718        /* fri* does not set FPSCR[XX] */
 719        if (!inexact) {
 720            env->fp_status.float_exception_flags &= ~float_flag_inexact;
 721        }
 722    }
 723    float_check_status(env);
 724    return farg.ll;
 725}
 726
 727uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
 728{
 729    return do_fri(env, arg, float_round_ties_away);
 730}
 731
 732uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
 733{
 734    return do_fri(env, arg, float_round_to_zero);
 735}
 736
 737uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
 738{
 739    return do_fri(env, arg, float_round_up);
 740}
 741
 742uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
 743{
 744    return do_fri(env, arg, float_round_down);
 745}
 746
 747#define FPU_MADDSUB_UPDATE(NAME, TP)                                    \
 748static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3,           \
 749                 unsigned int madd_flags)                               \
 750{                                                                       \
 751    if (TP##_is_signaling_nan(arg1, &env->fp_status) ||                 \
 752        TP##_is_signaling_nan(arg2, &env->fp_status) ||                 \
 753        TP##_is_signaling_nan(arg3, &env->fp_status)) {                 \
 754        /* sNaN operation */                                            \
 755        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);          \
 756    }                                                                   \
 757    if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) ||               \
 758        (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) {               \
 759        /* Multiplication of zero by infinity */                        \
 760        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);           \
 761    }                                                                   \
 762    if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) &&           \
 763        TP##_is_infinity(arg3)) {                                       \
 764        uint8_t aSign, bSign, cSign;                                    \
 765                                                                        \
 766        aSign = TP##_is_neg(arg1);                                      \
 767        bSign = TP##_is_neg(arg2);                                      \
 768        cSign = TP##_is_neg(arg3);                                      \
 769        if (madd_flags & float_muladd_negate_c) {                       \
 770            cSign ^= 1;                                                 \
 771        }                                                               \
 772        if (aSign ^ bSign ^ cSign) {                                    \
 773            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);       \
 774        }                                                               \
 775    }                                                                   \
 776}
 777FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
 778FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
 779
 780#define FPU_FMADD(op, madd_flags)                                       \
 781uint64_t helper_##op(CPUPPCState *env, uint64_t arg1,                   \
 782                     uint64_t arg2, uint64_t arg3)                      \
 783{                                                                       \
 784    uint32_t flags;                                                     \
 785    float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags,          \
 786                                 &env->fp_status);                      \
 787    flags = get_float_exception_flags(&env->fp_status);                 \
 788    if (flags) {                                                        \
 789        if (flags & float_flag_invalid) {                               \
 790            float64_maddsub_update_excp(env, arg1, arg2, arg3,          \
 791                                        madd_flags);                    \
 792        }                                                               \
 793        float_check_status(env);                                        \
 794    }                                                                   \
 795    return ret;                                                         \
 796}
 797
 798#define MADD_FLGS 0
 799#define MSUB_FLGS float_muladd_negate_c
 800#define NMADD_FLGS float_muladd_negate_result
 801#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
 802
 803FPU_FMADD(fmadd, MADD_FLGS)
 804FPU_FMADD(fnmadd, NMADD_FLGS)
 805FPU_FMADD(fmsub, MSUB_FLGS)
 806FPU_FMADD(fnmsub, NMSUB_FLGS)
 807
 808/* frsp - frsp. */
 809uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
 810{
 811    CPU_DoubleU farg;
 812    float32 f32;
 813
 814    farg.ll = arg;
 815
 816    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 817        /* sNaN square root */
 818        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 819    }
 820    f32 = float64_to_float32(farg.d, &env->fp_status);
 821    farg.d = float32_to_float64(f32, &env->fp_status);
 822
 823    return farg.ll;
 824}
 825
 826/* fsqrt - fsqrt. */
 827uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
 828{
 829    CPU_DoubleU farg;
 830
 831    farg.ll = arg;
 832
 833    if (unlikely(float64_is_any_nan(farg.d))) {
 834        if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 835            /* sNaN reciprocal square root */
 836            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 837            farg.ll = float64_snan_to_qnan(farg.ll);
 838        }
 839    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
 840        /* Square root of a negative nonzero number */
 841        farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
 842    } else {
 843        farg.d = float64_sqrt(farg.d, &env->fp_status);
 844    }
 845    return farg.ll;
 846}
 847
 848/* fre - fre. */
 849uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
 850{
 851    CPU_DoubleU farg;
 852
 853    farg.ll = arg;
 854
 855    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 856        /* sNaN reciprocal */
 857        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 858    }
 859    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
 860    return farg.d;
 861}
 862
 863/* fres - fres. */
 864uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
 865{
 866    CPU_DoubleU farg;
 867    float32 f32;
 868
 869    farg.ll = arg;
 870
 871    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 872        /* sNaN reciprocal */
 873        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 874    }
 875    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
 876    f32 = float64_to_float32(farg.d, &env->fp_status);
 877    farg.d = float32_to_float64(f32, &env->fp_status);
 878
 879    return farg.ll;
 880}
 881
 882/* frsqrte  - frsqrte. */
 883uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
 884{
 885    CPU_DoubleU farg;
 886
 887    farg.ll = arg;
 888
 889    if (unlikely(float64_is_any_nan(farg.d))) {
 890        if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 891            /* sNaN reciprocal square root */
 892            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 893            farg.ll = float64_snan_to_qnan(farg.ll);
 894        }
 895    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
 896        /* Reciprocal square root of a negative nonzero number */
 897        farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
 898    } else {
 899        farg.d = float64_sqrt(farg.d, &env->fp_status);
 900        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
 901    }
 902
 903    return farg.ll;
 904}
 905
 906/* fsel - fsel. */
 907uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
 908                     uint64_t arg3)
 909{
 910    CPU_DoubleU farg1;
 911
 912    farg1.ll = arg1;
 913
 914    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
 915        !float64_is_any_nan(farg1.d)) {
 916        return arg2;
 917    } else {
 918        return arg3;
 919    }
 920}
 921
 922uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
 923{
 924    int fe_flag = 0;
 925    int fg_flag = 0;
 926
 927    if (unlikely(float64_is_infinity(fra) ||
 928                 float64_is_infinity(frb) ||
 929                 float64_is_zero(frb))) {
 930        fe_flag = 1;
 931        fg_flag = 1;
 932    } else {
 933        int e_a = ppc_float64_get_unbiased_exp(fra);
 934        int e_b = ppc_float64_get_unbiased_exp(frb);
 935
 936        if (unlikely(float64_is_any_nan(fra) ||
 937                     float64_is_any_nan(frb))) {
 938            fe_flag = 1;
 939        } else if ((e_b <= -1022) || (e_b >= 1021)) {
 940            fe_flag = 1;
 941        } else if (!float64_is_zero(fra) &&
 942                   (((e_a - e_b) >= 1023) ||
 943                    ((e_a - e_b) <= -1021) ||
 944                    (e_a <= -970))) {
 945            fe_flag = 1;
 946        }
 947
 948        if (unlikely(float64_is_zero_or_denormal(frb))) {
 949            /* XB is not zero because of the above check and */
 950            /* so must be denormalized.                      */
 951            fg_flag = 1;
 952        }
 953    }
 954
 955    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
 956}
 957
 958uint32_t helper_ftsqrt(uint64_t frb)
 959{
 960    int fe_flag = 0;
 961    int fg_flag = 0;
 962
 963    if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
 964        fe_flag = 1;
 965        fg_flag = 1;
 966    } else {
 967        int e_b = ppc_float64_get_unbiased_exp(frb);
 968
 969        if (unlikely(float64_is_any_nan(frb))) {
 970            fe_flag = 1;
 971        } else if (unlikely(float64_is_zero(frb))) {
 972            fe_flag = 1;
 973        } else if (unlikely(float64_is_neg(frb))) {
 974            fe_flag = 1;
 975        } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
 976            fe_flag = 1;
 977        }
 978
 979        if (unlikely(float64_is_zero_or_denormal(frb))) {
 980            /* XB is not zero because of the above check and */
 981            /* therefore must be denormalized.               */
 982            fg_flag = 1;
 983        }
 984    }
 985
 986    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
 987}
 988
 989void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
 990                  uint32_t crfD)
 991{
 992    CPU_DoubleU farg1, farg2;
 993    uint32_t ret = 0;
 994
 995    farg1.ll = arg1;
 996    farg2.ll = arg2;
 997
 998    if (unlikely(float64_is_any_nan(farg1.d) ||
 999                 float64_is_any_nan(farg2.d))) {
1000        ret = 0x01UL;
1001    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1002        ret = 0x08UL;
1003    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1004        ret = 0x04UL;
1005    } else {
1006        ret = 0x02UL;
1007    }
1008
1009    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1010    env->fpscr |= ret << FPSCR_FPRF;
1011    env->crf[crfD] = ret;
1012    if (unlikely(ret == 0x01UL
1013                 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1014                     float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1015        /* sNaN comparison */
1016        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1017    }
1018}
1019
1020void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1021                  uint32_t crfD)
1022{
1023    CPU_DoubleU farg1, farg2;
1024    uint32_t ret = 0;
1025
1026    farg1.ll = arg1;
1027    farg2.ll = arg2;
1028
1029    if (unlikely(float64_is_any_nan(farg1.d) ||
1030                 float64_is_any_nan(farg2.d))) {
1031        ret = 0x01UL;
1032    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1033        ret = 0x08UL;
1034    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1035        ret = 0x04UL;
1036    } else {
1037        ret = 0x02UL;
1038    }
1039
1040    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1041    env->fpscr |= ret << FPSCR_FPRF;
1042    env->crf[crfD] = ret;
1043    if (unlikely(ret == 0x01UL)) {
1044        if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1045            float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1046            /* sNaN comparison */
1047            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
1048                                  POWERPC_EXCP_FP_VXVC, 1);
1049        } else {
1050            /* qNaN comparison */
1051            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
1052        }
1053    }
1054}
1055
1056/* Single-precision floating-point conversions */
1057static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1058{
1059    CPU_FloatU u;
1060
1061    u.f = int32_to_float32(val, &env->vec_status);
1062
1063    return u.l;
1064}
1065
1066static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1067{
1068    CPU_FloatU u;
1069
1070    u.f = uint32_to_float32(val, &env->vec_status);
1071
1072    return u.l;
1073}
1074
1075static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1076{
1077    CPU_FloatU u;
1078
1079    u.l = val;
1080    /* NaN are not treated the same way IEEE 754 does */
1081    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1082        return 0;
1083    }
1084
1085    return float32_to_int32(u.f, &env->vec_status);
1086}
1087
1088static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1089{
1090    CPU_FloatU u;
1091
1092    u.l = val;
1093    /* NaN are not treated the same way IEEE 754 does */
1094    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1095        return 0;
1096    }
1097
1098    return float32_to_uint32(u.f, &env->vec_status);
1099}
1100
1101static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1102{
1103    CPU_FloatU u;
1104
1105    u.l = val;
1106    /* NaN are not treated the same way IEEE 754 does */
1107    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1108        return 0;
1109    }
1110
1111    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1112}
1113
1114static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1115{
1116    CPU_FloatU u;
1117
1118    u.l = val;
1119    /* NaN are not treated the same way IEEE 754 does */
1120    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1121        return 0;
1122    }
1123
1124    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1125}
1126
1127static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1128{
1129    CPU_FloatU u;
1130    float32 tmp;
1131
1132    u.f = int32_to_float32(val, &env->vec_status);
1133    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1134    u.f = float32_div(u.f, tmp, &env->vec_status);
1135
1136    return u.l;
1137}
1138
1139static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1140{
1141    CPU_FloatU u;
1142    float32 tmp;
1143
1144    u.f = uint32_to_float32(val, &env->vec_status);
1145    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1146    u.f = float32_div(u.f, tmp, &env->vec_status);
1147
1148    return u.l;
1149}
1150
1151static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1152{
1153    CPU_FloatU u;
1154    float32 tmp;
1155
1156    u.l = val;
1157    /* NaN are not treated the same way IEEE 754 does */
1158    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1159        return 0;
1160    }
1161    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1162    u.f = float32_mul(u.f, tmp, &env->vec_status);
1163
1164    return float32_to_int32(u.f, &env->vec_status);
1165}
1166
1167static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1168{
1169    CPU_FloatU u;
1170    float32 tmp;
1171
1172    u.l = val;
1173    /* NaN are not treated the same way IEEE 754 does */
1174    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1175        return 0;
1176    }
1177    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1178    u.f = float32_mul(u.f, tmp, &env->vec_status);
1179
1180    return float32_to_uint32(u.f, &env->vec_status);
1181}
1182
1183#define HELPER_SPE_SINGLE_CONV(name)                              \
1184    uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1185    {                                                             \
1186        return e##name(env, val);                                 \
1187    }
1188/* efscfsi */
1189HELPER_SPE_SINGLE_CONV(fscfsi);
1190/* efscfui */
1191HELPER_SPE_SINGLE_CONV(fscfui);
1192/* efscfuf */
1193HELPER_SPE_SINGLE_CONV(fscfuf);
1194/* efscfsf */
1195HELPER_SPE_SINGLE_CONV(fscfsf);
1196/* efsctsi */
1197HELPER_SPE_SINGLE_CONV(fsctsi);
1198/* efsctui */
1199HELPER_SPE_SINGLE_CONV(fsctui);
1200/* efsctsiz */
1201HELPER_SPE_SINGLE_CONV(fsctsiz);
1202/* efsctuiz */
1203HELPER_SPE_SINGLE_CONV(fsctuiz);
1204/* efsctsf */
1205HELPER_SPE_SINGLE_CONV(fsctsf);
1206/* efsctuf */
1207HELPER_SPE_SINGLE_CONV(fsctuf);
1208
1209#define HELPER_SPE_VECTOR_CONV(name)                            \
1210    uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1211    {                                                           \
1212        return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1213            (uint64_t)e##name(env, val);                        \
1214    }
1215/* evfscfsi */
1216HELPER_SPE_VECTOR_CONV(fscfsi);
1217/* evfscfui */
1218HELPER_SPE_VECTOR_CONV(fscfui);
1219/* evfscfuf */
1220HELPER_SPE_VECTOR_CONV(fscfuf);
1221/* evfscfsf */
1222HELPER_SPE_VECTOR_CONV(fscfsf);
1223/* evfsctsi */
1224HELPER_SPE_VECTOR_CONV(fsctsi);
1225/* evfsctui */
1226HELPER_SPE_VECTOR_CONV(fsctui);
1227/* evfsctsiz */
1228HELPER_SPE_VECTOR_CONV(fsctsiz);
1229/* evfsctuiz */
1230HELPER_SPE_VECTOR_CONV(fsctuiz);
1231/* evfsctsf */
1232HELPER_SPE_VECTOR_CONV(fsctsf);
1233/* evfsctuf */
1234HELPER_SPE_VECTOR_CONV(fsctuf);
1235
1236/* Single-precision floating-point arithmetic */
1237static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1238{
1239    CPU_FloatU u1, u2;
1240
1241    u1.l = op1;
1242    u2.l = op2;
1243    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1244    return u1.l;
1245}
1246
1247static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1248{
1249    CPU_FloatU u1, u2;
1250
1251    u1.l = op1;
1252    u2.l = op2;
1253    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1254    return u1.l;
1255}
1256
1257static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1258{
1259    CPU_FloatU u1, u2;
1260
1261    u1.l = op1;
1262    u2.l = op2;
1263    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1264    return u1.l;
1265}
1266
1267static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1268{
1269    CPU_FloatU u1, u2;
1270
1271    u1.l = op1;
1272    u2.l = op2;
1273    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1274    return u1.l;
1275}
1276
1277#define HELPER_SPE_SINGLE_ARITH(name)                                   \
1278    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1279    {                                                                   \
1280        return e##name(env, op1, op2);                                  \
1281    }
1282/* efsadd */
1283HELPER_SPE_SINGLE_ARITH(fsadd);
1284/* efssub */
1285HELPER_SPE_SINGLE_ARITH(fssub);
1286/* efsmul */
1287HELPER_SPE_SINGLE_ARITH(fsmul);
1288/* efsdiv */
1289HELPER_SPE_SINGLE_ARITH(fsdiv);
1290
1291#define HELPER_SPE_VECTOR_ARITH(name)                                   \
1292    uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1293    {                                                                   \
1294        return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1295            (uint64_t)e##name(env, op1, op2);                           \
1296    }
1297/* evfsadd */
1298HELPER_SPE_VECTOR_ARITH(fsadd);
1299/* evfssub */
1300HELPER_SPE_VECTOR_ARITH(fssub);
1301/* evfsmul */
1302HELPER_SPE_VECTOR_ARITH(fsmul);
1303/* evfsdiv */
1304HELPER_SPE_VECTOR_ARITH(fsdiv);
1305
1306/* Single-precision floating-point comparisons */
1307static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1308{
1309    CPU_FloatU u1, u2;
1310
1311    u1.l = op1;
1312    u2.l = op2;
1313    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1314}
1315
1316static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1317{
1318    CPU_FloatU u1, u2;
1319
1320    u1.l = op1;
1321    u2.l = op2;
1322    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1323}
1324
1325static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1326{
1327    CPU_FloatU u1, u2;
1328
1329    u1.l = op1;
1330    u2.l = op2;
1331    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1332}
1333
1334static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1335{
1336    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1337    return efscmplt(env, op1, op2);
1338}
1339
1340static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1341{
1342    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1343    return efscmpgt(env, op1, op2);
1344}
1345
1346static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1347{
1348    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1349    return efscmpeq(env, op1, op2);
1350}
1351
1352#define HELPER_SINGLE_SPE_CMP(name)                                     \
1353    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1354    {                                                                   \
1355        return e##name(env, op1, op2);                                  \
1356    }
1357/* efststlt */
1358HELPER_SINGLE_SPE_CMP(fststlt);
1359/* efststgt */
1360HELPER_SINGLE_SPE_CMP(fststgt);
1361/* efststeq */
1362HELPER_SINGLE_SPE_CMP(fststeq);
1363/* efscmplt */
1364HELPER_SINGLE_SPE_CMP(fscmplt);
1365/* efscmpgt */
1366HELPER_SINGLE_SPE_CMP(fscmpgt);
1367/* efscmpeq */
1368HELPER_SINGLE_SPE_CMP(fscmpeq);
1369
1370static inline uint32_t evcmp_merge(int t0, int t1)
1371{
1372    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1373}
1374
1375#define HELPER_VECTOR_SPE_CMP(name)                                     \
1376    uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1377    {                                                                   \
1378        return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1379                           e##name(env, op1, op2));                     \
1380    }
1381/* evfststlt */
1382HELPER_VECTOR_SPE_CMP(fststlt);
1383/* evfststgt */
1384HELPER_VECTOR_SPE_CMP(fststgt);
1385/* evfststeq */
1386HELPER_VECTOR_SPE_CMP(fststeq);
1387/* evfscmplt */
1388HELPER_VECTOR_SPE_CMP(fscmplt);
1389/* evfscmpgt */
1390HELPER_VECTOR_SPE_CMP(fscmpgt);
1391/* evfscmpeq */
1392HELPER_VECTOR_SPE_CMP(fscmpeq);
1393
1394/* Double-precision floating-point conversion */
1395uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1396{
1397    CPU_DoubleU u;
1398
1399    u.d = int32_to_float64(val, &env->vec_status);
1400
1401    return u.ll;
1402}
1403
1404uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1405{
1406    CPU_DoubleU u;
1407
1408    u.d = int64_to_float64(val, &env->vec_status);
1409
1410    return u.ll;
1411}
1412
1413uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1414{
1415    CPU_DoubleU u;
1416
1417    u.d = uint32_to_float64(val, &env->vec_status);
1418
1419    return u.ll;
1420}
1421
1422uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1423{
1424    CPU_DoubleU u;
1425
1426    u.d = uint64_to_float64(val, &env->vec_status);
1427
1428    return u.ll;
1429}
1430
1431uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1432{
1433    CPU_DoubleU u;
1434
1435    u.ll = val;
1436    /* NaN are not treated the same way IEEE 754 does */
1437    if (unlikely(float64_is_any_nan(u.d))) {
1438        return 0;
1439    }
1440
1441    return float64_to_int32(u.d, &env->vec_status);
1442}
1443
1444uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1445{
1446    CPU_DoubleU u;
1447
1448    u.ll = val;
1449    /* NaN are not treated the same way IEEE 754 does */
1450    if (unlikely(float64_is_any_nan(u.d))) {
1451        return 0;
1452    }
1453
1454    return float64_to_uint32(u.d, &env->vec_status);
1455}
1456
1457uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1458{
1459    CPU_DoubleU u;
1460
1461    u.ll = val;
1462    /* NaN are not treated the same way IEEE 754 does */
1463    if (unlikely(float64_is_any_nan(u.d))) {
1464        return 0;
1465    }
1466
1467    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1468}
1469
1470uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1471{
1472    CPU_DoubleU u;
1473
1474    u.ll = val;
1475    /* NaN are not treated the same way IEEE 754 does */
1476    if (unlikely(float64_is_any_nan(u.d))) {
1477        return 0;
1478    }
1479
1480    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1481}
1482
1483uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1484{
1485    CPU_DoubleU u;
1486
1487    u.ll = val;
1488    /* NaN are not treated the same way IEEE 754 does */
1489    if (unlikely(float64_is_any_nan(u.d))) {
1490        return 0;
1491    }
1492
1493    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1494}
1495
1496uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1497{
1498    CPU_DoubleU u;
1499
1500    u.ll = val;
1501    /* NaN are not treated the same way IEEE 754 does */
1502    if (unlikely(float64_is_any_nan(u.d))) {
1503        return 0;
1504    }
1505
1506    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1507}
1508
1509uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1510{
1511    CPU_DoubleU u;
1512    float64 tmp;
1513
1514    u.d = int32_to_float64(val, &env->vec_status);
1515    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1516    u.d = float64_div(u.d, tmp, &env->vec_status);
1517
1518    return u.ll;
1519}
1520
1521uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1522{
1523    CPU_DoubleU u;
1524    float64 tmp;
1525
1526    u.d = uint32_to_float64(val, &env->vec_status);
1527    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1528    u.d = float64_div(u.d, tmp, &env->vec_status);
1529
1530    return u.ll;
1531}
1532
1533uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1534{
1535    CPU_DoubleU u;
1536    float64 tmp;
1537
1538    u.ll = val;
1539    /* NaN are not treated the same way IEEE 754 does */
1540    if (unlikely(float64_is_any_nan(u.d))) {
1541        return 0;
1542    }
1543    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1544    u.d = float64_mul(u.d, tmp, &env->vec_status);
1545
1546    return float64_to_int32(u.d, &env->vec_status);
1547}
1548
1549uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1550{
1551    CPU_DoubleU u;
1552    float64 tmp;
1553
1554    u.ll = val;
1555    /* NaN are not treated the same way IEEE 754 does */
1556    if (unlikely(float64_is_any_nan(u.d))) {
1557        return 0;
1558    }
1559    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1560    u.d = float64_mul(u.d, tmp, &env->vec_status);
1561
1562    return float64_to_uint32(u.d, &env->vec_status);
1563}
1564
1565uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1566{
1567    CPU_DoubleU u1;
1568    CPU_FloatU u2;
1569
1570    u1.ll = val;
1571    u2.f = float64_to_float32(u1.d, &env->vec_status);
1572
1573    return u2.l;
1574}
1575
1576uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1577{
1578    CPU_DoubleU u2;
1579    CPU_FloatU u1;
1580
1581    u1.l = val;
1582    u2.d = float32_to_float64(u1.f, &env->vec_status);
1583
1584    return u2.ll;
1585}
1586
1587/* Double precision fixed-point arithmetic */
1588uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1589{
1590    CPU_DoubleU u1, u2;
1591
1592    u1.ll = op1;
1593    u2.ll = op2;
1594    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1595    return u1.ll;
1596}
1597
1598uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1599{
1600    CPU_DoubleU u1, u2;
1601
1602    u1.ll = op1;
1603    u2.ll = op2;
1604    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1605    return u1.ll;
1606}
1607
1608uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1609{
1610    CPU_DoubleU u1, u2;
1611
1612    u1.ll = op1;
1613    u2.ll = op2;
1614    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1615    return u1.ll;
1616}
1617
1618uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1619{
1620    CPU_DoubleU u1, u2;
1621
1622    u1.ll = op1;
1623    u2.ll = op2;
1624    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1625    return u1.ll;
1626}
1627
1628/* Double precision floating point helpers */
1629uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1630{
1631    CPU_DoubleU u1, u2;
1632
1633    u1.ll = op1;
1634    u2.ll = op2;
1635    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1636}
1637
1638uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1639{
1640    CPU_DoubleU u1, u2;
1641
1642    u1.ll = op1;
1643    u2.ll = op2;
1644    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1645}
1646
1647uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1648{
1649    CPU_DoubleU u1, u2;
1650
1651    u1.ll = op1;
1652    u2.ll = op2;
1653    return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1654}
1655
1656uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1657{
1658    /* XXX: TODO: test special values (NaN, infinites, ...) */
1659    return helper_efdtstlt(env, op1, op2);
1660}
1661
1662uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1663{
1664    /* XXX: TODO: test special values (NaN, infinites, ...) */
1665    return helper_efdtstgt(env, op1, op2);
1666}
1667
1668uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1669{
1670    /* XXX: TODO: test special values (NaN, infinites, ...) */
1671    return helper_efdtsteq(env, op1, op2);
1672}
1673
1674#define float64_to_float64(x, env) x
1675
1676
1677/* VSX_ADD_SUB - VSX floating point add/subract
1678 *   name  - instruction mnemonic
1679 *   op    - operation (add or sub)
1680 *   nels  - number of elements (1, 2 or 4)
1681 *   tp    - type (float32 or float64)
1682 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1683 *   sfprf - set FPRF
1684 */
1685#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
1686void helper_##name(CPUPPCState *env, uint32_t opcode)                        \
1687{                                                                            \
1688    ppc_vsr_t xt, xa, xb;                                                    \
1689    int i;                                                                   \
1690                                                                             \
1691    getVSR(xA(opcode), &xa, env);                                            \
1692    getVSR(xB(opcode), &xb, env);                                            \
1693    getVSR(xT(opcode), &xt, env);                                            \
1694    helper_reset_fpstatus(env);                                              \
1695                                                                             \
1696    for (i = 0; i < nels; i++) {                                             \
1697        float_status tstat = env->fp_status;                                 \
1698        set_float_exception_flags(0, &tstat);                                \
1699        xt.fld = tp##_##op(xa.fld, xb.fld, &tstat);                          \
1700        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1701                                                                             \
1702        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1703            if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {      \
1704                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);    \
1705            } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||              \
1706                       tp##_is_signaling_nan(xb.fld, &tstat)) {              \
1707                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1708            }                                                                \
1709        }                                                                    \
1710                                                                             \
1711        if (r2sp) {                                                          \
1712            xt.fld = helper_frsp(env, xt.fld);                               \
1713        }                                                                    \
1714                                                                             \
1715        if (sfprf) {                                                         \
1716            helper_compute_fprf_float64(env, xt.fld);                        \
1717        }                                                                    \
1718    }                                                                        \
1719    putVSR(xT(opcode), &xt, env);                                            \
1720    float_check_status(env);                                                 \
1721}
1722
1723VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1724VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1725VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1726VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1727VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1728VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1729VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1730VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1731
1732void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
1733{
1734    ppc_vsr_t xt, xa, xb;
1735    float_status tstat;
1736
1737    getVSR(rA(opcode) + 32, &xa, env);
1738    getVSR(rB(opcode) + 32, &xb, env);
1739    getVSR(rD(opcode) + 32, &xt, env);
1740    helper_reset_fpstatus(env);
1741
1742    tstat = env->fp_status;
1743    if (unlikely(Rc(opcode) != 0)) {
1744        tstat.float_rounding_mode = float_round_to_odd;
1745    }
1746
1747    set_float_exception_flags(0, &tstat);
1748    xt.f128 = float128_add(xa.f128, xb.f128, &tstat);
1749    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1750
1751    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1752        if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
1753            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
1754        } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1755                   float128_is_signaling_nan(xb.f128, &tstat)) {
1756            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1757        }
1758    }
1759
1760    helper_compute_fprf_float128(env, xt.f128);
1761
1762    putVSR(rD(opcode) + 32, &xt, env);
1763    float_check_status(env);
1764}
1765
1766/* VSX_MUL - VSX floating point multiply
1767 *   op    - instruction mnemonic
1768 *   nels  - number of elements (1, 2 or 4)
1769 *   tp    - type (float32 or float64)
1770 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1771 *   sfprf - set FPRF
1772 */
1773#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
1774void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1775{                                                                            \
1776    ppc_vsr_t xt, xa, xb;                                                    \
1777    int i;                                                                   \
1778                                                                             \
1779    getVSR(xA(opcode), &xa, env);                                            \
1780    getVSR(xB(opcode), &xb, env);                                            \
1781    getVSR(xT(opcode), &xt, env);                                            \
1782    helper_reset_fpstatus(env);                                              \
1783                                                                             \
1784    for (i = 0; i < nels; i++) {                                             \
1785        float_status tstat = env->fp_status;                                 \
1786        set_float_exception_flags(0, &tstat);                                \
1787        xt.fld = tp##_mul(xa.fld, xb.fld, &tstat);                           \
1788        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1789                                                                             \
1790        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1791            if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) ||        \
1792                (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) {        \
1793                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf);    \
1794            } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||              \
1795                       tp##_is_signaling_nan(xb.fld, &tstat)) {              \
1796                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1797            }                                                                \
1798        }                                                                    \
1799                                                                             \
1800        if (r2sp) {                                                          \
1801            xt.fld = helper_frsp(env, xt.fld);                               \
1802        }                                                                    \
1803                                                                             \
1804        if (sfprf) {                                                         \
1805            helper_compute_fprf_float64(env, xt.fld);                        \
1806        }                                                                    \
1807    }                                                                        \
1808                                                                             \
1809    putVSR(xT(opcode), &xt, env);                                            \
1810    float_check_status(env);                                                 \
1811}
1812
1813VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1814VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1815VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1816VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1817
1818void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
1819{
1820    ppc_vsr_t xt, xa, xb;
1821    float_status tstat;
1822
1823    getVSR(rA(opcode) + 32, &xa, env);
1824    getVSR(rB(opcode) + 32, &xb, env);
1825    getVSR(rD(opcode) + 32, &xt, env);
1826
1827    helper_reset_fpstatus(env);
1828    tstat = env->fp_status;
1829    if (unlikely(Rc(opcode) != 0)) {
1830        tstat.float_rounding_mode = float_round_to_odd;
1831    }
1832
1833    set_float_exception_flags(0, &tstat);
1834    xt.f128 = float128_mul(xa.f128, xb.f128, &tstat);
1835    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1836
1837    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1838        if ((float128_is_infinity(xa.f128) && float128_is_zero(xb.f128)) ||
1839            (float128_is_infinity(xb.f128) && float128_is_zero(xa.f128))) {
1840            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
1841        } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1842                   float128_is_signaling_nan(xb.f128, &tstat)) {
1843            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1844        }
1845    }
1846    helper_compute_fprf_float128(env, xt.f128);
1847
1848    putVSR(rD(opcode) + 32, &xt, env);
1849    float_check_status(env);
1850}
1851
1852/* VSX_DIV - VSX floating point divide
1853 *   op    - instruction mnemonic
1854 *   nels  - number of elements (1, 2 or 4)
1855 *   tp    - type (float32 or float64)
1856 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1857 *   sfprf - set FPRF
1858 */
1859#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
1860void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1861{                                                                             \
1862    ppc_vsr_t xt, xa, xb;                                                     \
1863    int i;                                                                    \
1864                                                                              \
1865    getVSR(xA(opcode), &xa, env);                                             \
1866    getVSR(xB(opcode), &xb, env);                                             \
1867    getVSR(xT(opcode), &xt, env);                                             \
1868    helper_reset_fpstatus(env);                                               \
1869                                                                              \
1870    for (i = 0; i < nels; i++) {                                              \
1871        float_status tstat = env->fp_status;                                  \
1872        set_float_exception_flags(0, &tstat);                                 \
1873        xt.fld = tp##_div(xa.fld, xb.fld, &tstat);                            \
1874        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1875                                                                              \
1876        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1877            if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {       \
1878                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf);     \
1879            } else if (tp##_is_zero(xa.fld) &&                                \
1880                tp##_is_zero(xb.fld)) {                                       \
1881                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf);     \
1882            } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||               \
1883                tp##_is_signaling_nan(xb.fld, &tstat)) {                      \
1884                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1885            }                                                                 \
1886        }                                                                     \
1887                                                                              \
1888        if (r2sp) {                                                           \
1889            xt.fld = helper_frsp(env, xt.fld);                                \
1890        }                                                                     \
1891                                                                              \
1892        if (sfprf) {                                                          \
1893            helper_compute_fprf_float64(env, xt.fld);                         \
1894        }                                                                     \
1895    }                                                                         \
1896                                                                              \
1897    putVSR(xT(opcode), &xt, env);                                             \
1898    float_check_status(env);                                                  \
1899}
1900
1901VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1902VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1903VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1904VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1905
1906void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
1907{
1908    ppc_vsr_t xt, xa, xb;
1909    float_status tstat;
1910
1911    getVSR(rA(opcode) + 32, &xa, env);
1912    getVSR(rB(opcode) + 32, &xb, env);
1913    getVSR(rD(opcode) + 32, &xt, env);
1914
1915    helper_reset_fpstatus(env);
1916    tstat = env->fp_status;
1917    if (unlikely(Rc(opcode) != 0)) {
1918        tstat.float_rounding_mode = float_round_to_odd;
1919    }
1920
1921    set_float_exception_flags(0, &tstat);
1922    xt.f128 = float128_div(xa.f128, xb.f128, &tstat);
1923    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1924
1925    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1926        if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
1927            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
1928        } else if (float128_is_zero(xa.f128) &&
1929            float128_is_zero(xb.f128)) {
1930            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
1931        } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1932            float128_is_signaling_nan(xb.f128, &tstat)) {
1933            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1934        }
1935    }
1936
1937    helper_compute_fprf_float128(env, xt.f128);
1938    putVSR(rD(opcode) + 32, &xt, env);
1939    float_check_status(env);
1940}
1941
1942/* VSX_RE  - VSX floating point reciprocal estimate
1943 *   op    - instruction mnemonic
1944 *   nels  - number of elements (1, 2 or 4)
1945 *   tp    - type (float32 or float64)
1946 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1947 *   sfprf - set FPRF
1948 */
1949#define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
1950void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1951{                                                                             \
1952    ppc_vsr_t xt, xb;                                                         \
1953    int i;                                                                    \
1954                                                                              \
1955    getVSR(xB(opcode), &xb, env);                                             \
1956    getVSR(xT(opcode), &xt, env);                                             \
1957    helper_reset_fpstatus(env);                                               \
1958                                                                              \
1959    for (i = 0; i < nels; i++) {                                              \
1960        if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
1961                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1962        }                                                                     \
1963        xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status);                 \
1964                                                                              \
1965        if (r2sp) {                                                           \
1966            xt.fld = helper_frsp(env, xt.fld);                                \
1967        }                                                                     \
1968                                                                              \
1969        if (sfprf) {                                                          \
1970            helper_compute_fprf_float64(env, xt.fld);                         \
1971        }                                                                     \
1972    }                                                                         \
1973                                                                              \
1974    putVSR(xT(opcode), &xt, env);                                             \
1975    float_check_status(env);                                                  \
1976}
1977
1978VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
1979VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
1980VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
1981VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
1982
1983/* VSX_SQRT - VSX floating point square root
1984 *   op    - instruction mnemonic
1985 *   nels  - number of elements (1, 2 or 4)
1986 *   tp    - type (float32 or float64)
1987 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1988 *   sfprf - set FPRF
1989 */
1990#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
1991void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1992{                                                                            \
1993    ppc_vsr_t xt, xb;                                                        \
1994    int i;                                                                   \
1995                                                                             \
1996    getVSR(xB(opcode), &xb, env);                                            \
1997    getVSR(xT(opcode), &xt, env);                                            \
1998    helper_reset_fpstatus(env);                                              \
1999                                                                             \
2000    for (i = 0; i < nels; i++) {                                             \
2001        float_status tstat = env->fp_status;                                 \
2002        set_float_exception_flags(0, &tstat);                                \
2003        xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2004        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2005                                                                             \
2006        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2007            if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2008                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2009            } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2010                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2011            }                                                                \
2012        }                                                                    \
2013                                                                             \
2014        if (r2sp) {                                                          \
2015            xt.fld = helper_frsp(env, xt.fld);                               \
2016        }                                                                    \
2017                                                                             \
2018        if (sfprf) {                                                         \
2019            helper_compute_fprf_float64(env, xt.fld);                        \
2020        }                                                                    \
2021    }                                                                        \
2022                                                                             \
2023    putVSR(xT(opcode), &xt, env);                                            \
2024    float_check_status(env);                                                 \
2025}
2026
2027VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2028VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2029VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2030VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2031
2032/* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2033 *   op    - instruction mnemonic
2034 *   nels  - number of elements (1, 2 or 4)
2035 *   tp    - type (float32 or float64)
2036 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2037 *   sfprf - set FPRF
2038 */
2039#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
2040void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2041{                                                                            \
2042    ppc_vsr_t xt, xb;                                                        \
2043    int i;                                                                   \
2044                                                                             \
2045    getVSR(xB(opcode), &xb, env);                                            \
2046    getVSR(xT(opcode), &xt, env);                                            \
2047    helper_reset_fpstatus(env);                                              \
2048                                                                             \
2049    for (i = 0; i < nels; i++) {                                             \
2050        float_status tstat = env->fp_status;                                 \
2051        set_float_exception_flags(0, &tstat);                                \
2052        xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2053        xt.fld = tp##_div(tp##_one, xt.fld, &tstat);                         \
2054        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2055                                                                             \
2056        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2057            if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2058                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2059            } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2060                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2061            }                                                                \
2062        }                                                                    \
2063                                                                             \
2064        if (r2sp) {                                                          \
2065            xt.fld = helper_frsp(env, xt.fld);                               \
2066        }                                                                    \
2067                                                                             \
2068        if (sfprf) {                                                         \
2069            helper_compute_fprf_float64(env, xt.fld);                        \
2070        }                                                                    \
2071    }                                                                        \
2072                                                                             \
2073    putVSR(xT(opcode), &xt, env);                                            \
2074    float_check_status(env);                                                 \
2075}
2076
2077VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2078VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2079VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2080VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2081
2082/* VSX_TDIV - VSX floating point test for divide
2083 *   op    - instruction mnemonic
2084 *   nels  - number of elements (1, 2 or 4)
2085 *   tp    - type (float32 or float64)
2086 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2087 *   emin  - minimum unbiased exponent
2088 *   emax  - maximum unbiased exponent
2089 *   nbits - number of fraction bits
2090 */
2091#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2092void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2093{                                                                       \
2094    ppc_vsr_t xa, xb;                                                   \
2095    int i;                                                              \
2096    int fe_flag = 0;                                                    \
2097    int fg_flag = 0;                                                    \
2098                                                                        \
2099    getVSR(xA(opcode), &xa, env);                                       \
2100    getVSR(xB(opcode), &xb, env);                                       \
2101                                                                        \
2102    for (i = 0; i < nels; i++) {                                        \
2103        if (unlikely(tp##_is_infinity(xa.fld) ||                        \
2104                     tp##_is_infinity(xb.fld) ||                        \
2105                     tp##_is_zero(xb.fld))) {                           \
2106            fe_flag = 1;                                                \
2107            fg_flag = 1;                                                \
2108        } else {                                                        \
2109            int e_a = ppc_##tp##_get_unbiased_exp(xa.fld);              \
2110            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2111                                                                        \
2112            if (unlikely(tp##_is_any_nan(xa.fld) ||                     \
2113                         tp##_is_any_nan(xb.fld))) {                    \
2114                fe_flag = 1;                                            \
2115            } else if ((e_b <= emin) || (e_b >= (emax-2))) {            \
2116                fe_flag = 1;                                            \
2117            } else if (!tp##_is_zero(xa.fld) &&                         \
2118                       (((e_a - e_b) >= emax) ||                        \
2119                        ((e_a - e_b) <= (emin+1)) ||                    \
2120                         (e_a <= (emin+nbits)))) {                      \
2121                fe_flag = 1;                                            \
2122            }                                                           \
2123                                                                        \
2124            if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2125                /* XB is not zero because of the above check and */     \
2126                /* so must be denormalized.                      */     \
2127                fg_flag = 1;                                            \
2128            }                                                           \
2129        }                                                               \
2130    }                                                                   \
2131                                                                        \
2132    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2133}
2134
2135VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2136VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2137VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2138
2139/* VSX_TSQRT - VSX floating point test for square root
2140 *   op    - instruction mnemonic
2141 *   nels  - number of elements (1, 2 or 4)
2142 *   tp    - type (float32 or float64)
2143 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2144 *   emin  - minimum unbiased exponent
2145 *   emax  - maximum unbiased exponent
2146 *   nbits - number of fraction bits
2147 */
2148#define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2149void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2150{                                                                       \
2151    ppc_vsr_t xa, xb;                                                   \
2152    int i;                                                              \
2153    int fe_flag = 0;                                                    \
2154    int fg_flag = 0;                                                    \
2155                                                                        \
2156    getVSR(xA(opcode), &xa, env);                                       \
2157    getVSR(xB(opcode), &xb, env);                                       \
2158                                                                        \
2159    for (i = 0; i < nels; i++) {                                        \
2160        if (unlikely(tp##_is_infinity(xb.fld) ||                        \
2161                     tp##_is_zero(xb.fld))) {                           \
2162            fe_flag = 1;                                                \
2163            fg_flag = 1;                                                \
2164        } else {                                                        \
2165            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2166                                                                        \
2167            if (unlikely(tp##_is_any_nan(xb.fld))) {                    \
2168                fe_flag = 1;                                            \
2169            } else if (unlikely(tp##_is_zero(xb.fld))) {                \
2170                fe_flag = 1;                                            \
2171            } else if (unlikely(tp##_is_neg(xb.fld))) {                 \
2172                fe_flag = 1;                                            \
2173            } else if (!tp##_is_zero(xb.fld) &&                         \
2174                      (e_b <= (emin+nbits))) {                          \
2175                fe_flag = 1;                                            \
2176            }                                                           \
2177                                                                        \
2178            if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2179                /* XB is not zero because of the above check and */     \
2180                /* therefore must be denormalized.               */     \
2181                fg_flag = 1;                                            \
2182            }                                                           \
2183        }                                                               \
2184    }                                                                   \
2185                                                                        \
2186    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2187}
2188
2189VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2190VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2191VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2192
2193/* VSX_MADD - VSX floating point muliply/add variations
2194 *   op    - instruction mnemonic
2195 *   nels  - number of elements (1, 2 or 4)
2196 *   tp    - type (float32 or float64)
2197 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2198 *   maddflgs - flags for the float*muladd routine that control the
2199 *           various forms (madd, msub, nmadd, nmsub)
2200 *   afrm  - A form (1=A, 0=M)
2201 *   sfprf - set FPRF
2202 */
2203#define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp)              \
2204void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2205{                                                                             \
2206    ppc_vsr_t xt_in, xa, xb, xt_out;                                          \
2207    ppc_vsr_t *b, *c;                                                         \
2208    int i;                                                                    \
2209                                                                              \
2210    if (afrm) { /* AxB + T */                                                 \
2211        b = &xb;                                                              \
2212        c = &xt_in;                                                           \
2213    } else { /* AxT + B */                                                    \
2214        b = &xt_in;                                                           \
2215        c = &xb;                                                              \
2216    }                                                                         \
2217                                                                              \
2218    getVSR(xA(opcode), &xa, env);                                             \
2219    getVSR(xB(opcode), &xb, env);                                             \
2220    getVSR(xT(opcode), &xt_in, env);                                          \
2221                                                                              \
2222    xt_out = xt_in;                                                           \
2223                                                                              \
2224    helper_reset_fpstatus(env);                                               \
2225                                                                              \
2226    for (i = 0; i < nels; i++) {                                              \
2227        float_status tstat = env->fp_status;                                  \
2228        set_float_exception_flags(0, &tstat);                                 \
2229        if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2230            /* Avoid double rounding errors by rounding the intermediate */   \
2231            /* result to odd.                                            */   \
2232            set_float_rounding_mode(float_round_to_zero, &tstat);             \
2233            xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2234                                       maddflgs, &tstat);                     \
2235            xt_out.fld |= (get_float_exception_flags(&tstat) &                \
2236                              float_flag_inexact) != 0;                       \
2237        } else {                                                              \
2238            xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2239                                        maddflgs, &tstat);                    \
2240        }                                                                     \
2241        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2242                                                                              \
2243        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2244            tp##_maddsub_update_excp(env, xa.fld, b->fld, c->fld, maddflgs);  \
2245        }                                                                     \
2246                                                                              \
2247        if (r2sp) {                                                           \
2248            xt_out.fld = helper_frsp(env, xt_out.fld);                        \
2249        }                                                                     \
2250                                                                              \
2251        if (sfprf) {                                                          \
2252            helper_compute_fprf_float64(env, xt_out.fld);                     \
2253        }                                                                     \
2254    }                                                                         \
2255    putVSR(xT(opcode), &xt_out, env);                                         \
2256    float_check_status(env);                                                  \
2257}
2258
2259VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
2260VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
2261VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
2262VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
2263VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
2264VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
2265VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
2266VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
2267
2268VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
2269VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
2270VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
2271VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
2272VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
2273VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
2274VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
2275VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
2276
2277VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
2278VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
2279VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
2280VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
2281VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
2282VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
2283VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
2284VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
2285
2286VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
2287VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
2288VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
2289VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
2290VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
2291VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
2292VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
2293VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
2294
2295/* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2296 *   op    - instruction mnemonic
2297 *   cmp   - comparison operation
2298 *   exp   - expected result of comparison
2299 *   svxvc - set VXVC bit
2300 */
2301#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc)                                \
2302void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2303{                                                                             \
2304    ppc_vsr_t xt, xa, xb;                                                     \
2305    bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false;            \
2306                                                                              \
2307    getVSR(xA(opcode), &xa, env);                                             \
2308    getVSR(xB(opcode), &xb, env);                                             \
2309    getVSR(xT(opcode), &xt, env);                                             \
2310                                                                              \
2311    if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||              \
2312        float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {              \
2313        vxsnan_flag = true;                                                   \
2314        if (fpscr_ve == 0 && svxvc) {                                         \
2315            vxvc_flag = true;                                                 \
2316        }                                                                     \
2317    } else if (svxvc) {                                                       \
2318        vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2319            float64_is_quiet_nan(xb.VsrD(0), &env->fp_status);                \
2320    }                                                                         \
2321    if (vxsnan_flag) {                                                        \
2322        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);                \
2323    }                                                                         \
2324    if (vxvc_flag) {                                                          \
2325        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);                  \
2326    }                                                                         \
2327    vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag);                        \
2328                                                                              \
2329    if (!vex_flag) {                                                          \
2330        if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) {  \
2331            xt.VsrD(0) = -1;                                                  \
2332            xt.VsrD(1) = 0;                                                   \
2333        } else {                                                              \
2334            xt.VsrD(0) = 0;                                                   \
2335            xt.VsrD(1) = 0;                                                   \
2336        }                                                                     \
2337    }                                                                         \
2338    putVSR(xT(opcode), &xt, env);                                             \
2339    helper_float_check_status(env);                                           \
2340}
2341
2342VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2343VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2344VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2345VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2346
2347void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode)
2348{
2349    ppc_vsr_t xa, xb;
2350    int64_t exp_a, exp_b;
2351    uint32_t cc;
2352
2353    getVSR(xA(opcode), &xa, env);
2354    getVSR(xB(opcode), &xb, env);
2355
2356    exp_a = extract64(xa.VsrD(0), 52, 11);
2357    exp_b = extract64(xb.VsrD(0), 52, 11);
2358
2359    if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||
2360                 float64_is_any_nan(xb.VsrD(0)))) {
2361        cc = CRF_SO;
2362    } else {
2363        if (exp_a < exp_b) {
2364            cc = CRF_LT;
2365        } else if (exp_a > exp_b) {
2366            cc = CRF_GT;
2367        } else {
2368            cc = CRF_EQ;
2369        }
2370    }
2371
2372    env->fpscr &= ~(0x0F << FPSCR_FPRF);
2373    env->fpscr |= cc << FPSCR_FPRF;
2374    env->crf[BF(opcode)] = cc;
2375
2376    helper_float_check_status(env);
2377}
2378
2379void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode)
2380{
2381    ppc_vsr_t xa, xb;
2382    int64_t exp_a, exp_b;
2383    uint32_t cc;
2384
2385    getVSR(rA(opcode) + 32, &xa, env);
2386    getVSR(rB(opcode) + 32, &xb, env);
2387
2388    exp_a = extract64(xa.VsrD(0), 48, 15);
2389    exp_b = extract64(xb.VsrD(0), 48, 15);
2390
2391    if (unlikely(float128_is_any_nan(xa.f128) ||
2392                 float128_is_any_nan(xb.f128))) {
2393        cc = CRF_SO;
2394    } else {
2395        if (exp_a < exp_b) {
2396            cc = CRF_LT;
2397        } else if (exp_a > exp_b) {
2398            cc = CRF_GT;
2399        } else {
2400            cc = CRF_EQ;
2401        }
2402    }
2403
2404    env->fpscr &= ~(0x0F << FPSCR_FPRF);
2405    env->fpscr |= cc << FPSCR_FPRF;
2406    env->crf[BF(opcode)] = cc;
2407
2408    helper_float_check_status(env);
2409}
2410
2411#define VSX_SCALAR_CMP(op, ordered)                                      \
2412void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
2413{                                                                        \
2414    ppc_vsr_t xa, xb;                                                    \
2415    uint32_t cc = 0;                                                     \
2416    bool vxsnan_flag = false, vxvc_flag = false;                         \
2417                                                                         \
2418    helper_reset_fpstatus(env);                                          \
2419    getVSR(xA(opcode), &xa, env);                                        \
2420    getVSR(xB(opcode), &xb, env);                                        \
2421                                                                         \
2422    if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||         \
2423        float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {         \
2424        vxsnan_flag = true;                                              \
2425        cc = CRF_SO;                                                     \
2426        if (fpscr_ve == 0 && ordered) {                                  \
2427            vxvc_flag = true;                                            \
2428        }                                                                \
2429    } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2430               float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) {      \
2431        cc = CRF_SO;                                                     \
2432        if (ordered) {                                                   \
2433            vxvc_flag = true;                                            \
2434        }                                                                \
2435    }                                                                    \
2436    if (vxsnan_flag) {                                                   \
2437        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);           \
2438    }                                                                    \
2439    if (vxvc_flag) {                                                     \
2440        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);             \
2441    }                                                                    \
2442                                                                         \
2443    if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {           \
2444        cc |= CRF_LT;                                                    \
2445    } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {   \
2446        cc |= CRF_GT;                                                    \
2447    } else {                                                             \
2448        cc |= CRF_EQ;                                                    \
2449    }                                                                    \
2450                                                                         \
2451    env->fpscr &= ~(0x0F << FPSCR_FPRF);                                 \
2452    env->fpscr |= cc << FPSCR_FPRF;                                      \
2453    env->crf[BF(opcode)] = cc;                                           \
2454                                                                         \
2455    float_check_status(env);                                             \
2456}
2457
2458VSX_SCALAR_CMP(xscmpodp, 1)
2459VSX_SCALAR_CMP(xscmpudp, 0)
2460
2461#define VSX_SCALAR_CMPQ(op, ordered)                                    \
2462void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2463{                                                                       \
2464    ppc_vsr_t xa, xb;                                                   \
2465    uint32_t cc = 0;                                                    \
2466    bool vxsnan_flag = false, vxvc_flag = false;                        \
2467                                                                        \
2468    helper_reset_fpstatus(env);                                         \
2469    getVSR(rA(opcode) + 32, &xa, env);                                  \
2470    getVSR(rB(opcode) + 32, &xb, env);                                  \
2471                                                                        \
2472    if (float128_is_signaling_nan(xa.f128, &env->fp_status) ||          \
2473        float128_is_signaling_nan(xb.f128, &env->fp_status)) {          \
2474        vxsnan_flag = true;                                             \
2475        cc = CRF_SO;                                                    \
2476        if (fpscr_ve == 0 && ordered) {                                 \
2477            vxvc_flag = true;                                           \
2478        }                                                               \
2479    } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) ||       \
2480               float128_is_quiet_nan(xb.f128, &env->fp_status)) {       \
2481        cc = CRF_SO;                                                    \
2482        if (ordered) {                                                  \
2483            vxvc_flag = true;                                           \
2484        }                                                               \
2485    }                                                                   \
2486    if (vxsnan_flag) {                                                  \
2487        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);          \
2488    }                                                                   \
2489    if (vxvc_flag) {                                                    \
2490        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);            \
2491    }                                                                   \
2492                                                                        \
2493    if (float128_lt(xa.f128, xb.f128, &env->fp_status)) {               \
2494        cc |= CRF_LT;                                                   \
2495    } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) {       \
2496        cc |= CRF_GT;                                                   \
2497    } else {                                                            \
2498        cc |= CRF_EQ;                                                   \
2499    }                                                                   \
2500                                                                        \
2501    env->fpscr &= ~(0x0F << FPSCR_FPRF);                                \
2502    env->fpscr |= cc << FPSCR_FPRF;                                     \
2503    env->crf[BF(opcode)] = cc;                                          \
2504                                                                        \
2505    float_check_status(env);                                            \
2506}
2507
2508VSX_SCALAR_CMPQ(xscmpoqp, 1)
2509VSX_SCALAR_CMPQ(xscmpuqp, 0)
2510
2511/* VSX_MAX_MIN - VSX floating point maximum/minimum
2512 *   name  - instruction mnemonic
2513 *   op    - operation (max or min)
2514 *   nels  - number of elements (1, 2 or 4)
2515 *   tp    - type (float32 or float64)
2516 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2517 */
2518#define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2519void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2520{                                                                             \
2521    ppc_vsr_t xt, xa, xb;                                                     \
2522    int i;                                                                    \
2523                                                                              \
2524    getVSR(xA(opcode), &xa, env);                                             \
2525    getVSR(xB(opcode), &xb, env);                                             \
2526    getVSR(xT(opcode), &xt, env);                                             \
2527                                                                              \
2528    for (i = 0; i < nels; i++) {                                              \
2529        xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status);                  \
2530        if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) ||        \
2531                     tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
2532            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2533        }                                                                     \
2534    }                                                                         \
2535                                                                              \
2536    putVSR(xT(opcode), &xt, env);                                             \
2537    float_check_status(env);                                                  \
2538}
2539
2540VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2541VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2542VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2543VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2544VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2545VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2546
2547#define VSX_MAX_MINC(name, max)                                               \
2548void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2549{                                                                             \
2550    ppc_vsr_t xt, xa, xb;                                                     \
2551    bool vxsnan_flag = false, vex_flag = false;                               \
2552                                                                              \
2553    getVSR(rA(opcode) + 32, &xa, env);                                        \
2554    getVSR(rB(opcode) + 32, &xb, env);                                        \
2555    getVSR(rD(opcode) + 32, &xt, env);                                        \
2556                                                                              \
2557    if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||                            \
2558                 float64_is_any_nan(xb.VsrD(0)))) {                           \
2559        if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||          \
2560            float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
2561            vxsnan_flag = true;                                               \
2562        }                                                                     \
2563        xt.VsrD(0) = xb.VsrD(0);                                              \
2564    } else if ((max &&                                                        \
2565               !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) ||       \
2566               (!max &&                                                       \
2567               float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) {        \
2568        xt.VsrD(0) = xa.VsrD(0);                                              \
2569    } else {                                                                  \
2570        xt.VsrD(0) = xb.VsrD(0);                                              \
2571    }                                                                         \
2572                                                                              \
2573    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2574    if (vxsnan_flag) {                                                        \
2575            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2576    }                                                                         \
2577    if (!vex_flag) {                                                          \
2578        putVSR(rD(opcode) + 32, &xt, env);                                    \
2579    }                                                                         \
2580}                                                                             \
2581
2582VSX_MAX_MINC(xsmaxcdp, 1);
2583VSX_MAX_MINC(xsmincdp, 0);
2584
2585#define VSX_MAX_MINJ(name, max)                                               \
2586void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2587{                                                                             \
2588    ppc_vsr_t xt, xa, xb;                                                     \
2589    bool vxsnan_flag = false, vex_flag = false;                               \
2590                                                                              \
2591    getVSR(rA(opcode) + 32, &xa, env);                                        \
2592    getVSR(rB(opcode) + 32, &xb, env);                                        \
2593    getVSR(rD(opcode) + 32, &xt, env);                                        \
2594                                                                              \
2595    if (unlikely(float64_is_any_nan(xa.VsrD(0)))) {                           \
2596        if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) {          \
2597            vxsnan_flag = true;                                               \
2598        }                                                                     \
2599        xt.VsrD(0) = xa.VsrD(0);                                              \
2600    } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) {                    \
2601        if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
2602            vxsnan_flag = true;                                               \
2603        }                                                                     \
2604        xt.VsrD(0) = xb.VsrD(0);                                              \
2605    } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) {  \
2606        if (max) {                                                            \
2607            if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2608                xt.VsrD(0) = 0ULL;                                            \
2609            } else {                                                          \
2610                xt.VsrD(0) = 0x8000000000000000ULL;                           \
2611            }                                                                 \
2612        } else {                                                              \
2613            if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) {   \
2614                xt.VsrD(0) = 0x8000000000000000ULL;                           \
2615            } else {                                                          \
2616                xt.VsrD(0) = 0ULL;                                            \
2617            }                                                                 \
2618        }                                                                     \
2619    } else if ((max &&                                                        \
2620               !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) ||       \
2621               (!max &&                                                       \
2622               float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) {        \
2623        xt.VsrD(0) = xa.VsrD(0);                                              \
2624    } else {                                                                  \
2625        xt.VsrD(0) = xb.VsrD(0);                                              \
2626    }                                                                         \
2627                                                                              \
2628    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2629    if (vxsnan_flag) {                                                        \
2630            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2631    }                                                                         \
2632    if (!vex_flag) {                                                          \
2633        putVSR(rD(opcode) + 32, &xt, env);                                    \
2634    }                                                                         \
2635}                                                                             \
2636
2637VSX_MAX_MINJ(xsmaxjdp, 1);
2638VSX_MAX_MINJ(xsminjdp, 0);
2639
2640/* VSX_CMP - VSX floating point compare
2641 *   op    - instruction mnemonic
2642 *   nels  - number of elements (1, 2 or 4)
2643 *   tp    - type (float32 or float64)
2644 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2645 *   cmp   - comparison operation
2646 *   svxvc - set VXVC bit
2647 *   exp   - expected result of comparison
2648 */
2649#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
2650void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2651{                                                                         \
2652    ppc_vsr_t xt, xa, xb;                                                 \
2653    int i;                                                                \
2654    int all_true = 1;                                                     \
2655    int all_false = 1;                                                    \
2656                                                                          \
2657    getVSR(xA(opcode), &xa, env);                                         \
2658    getVSR(xB(opcode), &xb, env);                                         \
2659    getVSR(xT(opcode), &xt, env);                                         \
2660                                                                          \
2661    for (i = 0; i < nels; i++) {                                          \
2662        if (unlikely(tp##_is_any_nan(xa.fld) ||                           \
2663                     tp##_is_any_nan(xb.fld))) {                          \
2664            if (tp##_is_signaling_nan(xa.fld, &env->fp_status) ||         \
2665                tp##_is_signaling_nan(xb.fld, &env->fp_status)) {         \
2666                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);    \
2667            }                                                             \
2668            if (svxvc) {                                                  \
2669                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);      \
2670            }                                                             \
2671            xt.fld = 0;                                                   \
2672            all_true = 0;                                                 \
2673        } else {                                                          \
2674            if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) {     \
2675                xt.fld = -1;                                              \
2676                all_false = 0;                                            \
2677            } else {                                                      \
2678                xt.fld = 0;                                               \
2679                all_true = 0;                                             \
2680            }                                                             \
2681        }                                                                 \
2682    }                                                                     \
2683                                                                          \
2684    putVSR(xT(opcode), &xt, env);                                         \
2685    if ((opcode >> (31-21)) & 1) {                                        \
2686        env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);       \
2687    }                                                                     \
2688    float_check_status(env);                                              \
2689 }
2690
2691VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2692VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2693VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2694VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2695VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2696VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2697VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2698VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2699
2700/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2701 *   op    - instruction mnemonic
2702 *   nels  - number of elements (1, 2 or 4)
2703 *   stp   - source type (float32 or float64)
2704 *   ttp   - target type (float32 or float64)
2705 *   sfld  - source vsr_t field
2706 *   tfld  - target vsr_t field (f32 or f64)
2707 *   sfprf - set FPRF
2708 */
2709#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2710void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2711{                                                                  \
2712    ppc_vsr_t xt, xb;                                              \
2713    int i;                                                         \
2714                                                                   \
2715    getVSR(xB(opcode), &xb, env);                                  \
2716    getVSR(xT(opcode), &xt, env);                                  \
2717                                                                   \
2718    for (i = 0; i < nels; i++) {                                   \
2719        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);        \
2720        if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2721                                            &env->fp_status))) {   \
2722            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2723            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2724        }                                                          \
2725        if (sfprf) {                                               \
2726            helper_compute_fprf_##ttp(env, xt.tfld);               \
2727        }                                                          \
2728    }                                                              \
2729                                                                   \
2730    putVSR(xT(opcode), &xt, env);                                  \
2731    float_check_status(env);                                       \
2732}
2733
2734VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2735VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2736VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
2737VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
2738
2739/* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2740 *   op    - instruction mnemonic
2741 *   nels  - number of elements (1, 2 or 4)
2742 *   stp   - source type (float32 or float64)
2743 *   ttp   - target type (float32 or float64)
2744 *   sfld  - source vsr_t field
2745 *   tfld  - target vsr_t field (f32 or f64)
2746 *   sfprf - set FPRF
2747 */
2748#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2749void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2750{                                                                       \
2751    ppc_vsr_t xt, xb;                                                   \
2752    int i;                                                              \
2753                                                                        \
2754    getVSR(rB(opcode) + 32, &xb, env);                                  \
2755    getVSR(rD(opcode) + 32, &xt, env);                                  \
2756                                                                        \
2757    for (i = 0; i < nels; i++) {                                        \
2758        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
2759        if (unlikely(stp##_is_signaling_nan(xb.sfld,                    \
2760                                            &env->fp_status))) {        \
2761            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);      \
2762            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                      \
2763        }                                                               \
2764        if (sfprf) {                                                    \
2765            helper_compute_fprf_##ttp(env, xt.tfld);                    \
2766        }                                                               \
2767    }                                                                   \
2768                                                                        \
2769    putVSR(rD(opcode) + 32, &xt, env);                                  \
2770    float_check_status(env);                                            \
2771}
2772
2773VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2774
2775/* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2776 *                       involving one half precision value
2777 *   op    - instruction mnemonic
2778 *   nels  - number of elements (1, 2 or 4)
2779 *   stp   - source type
2780 *   ttp   - target type
2781 *   sfld  - source vsr_t field
2782 *   tfld  - target vsr_t field
2783 *   sfprf - set FPRF
2784 */
2785#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2786void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2787{                                                                  \
2788    ppc_vsr_t xt, xb;                                              \
2789    int i;                                                         \
2790                                                                   \
2791    getVSR(xB(opcode), &xb, env);                                  \
2792    memset(&xt, 0, sizeof(xt));                                    \
2793                                                                   \
2794    for (i = 0; i < nels; i++) {                                   \
2795        xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status);     \
2796        if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2797                                            &env->fp_status))) {   \
2798            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2799            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2800        }                                                          \
2801        if (sfprf) {                                               \
2802            helper_compute_fprf_##ttp(env, xt.tfld);               \
2803        }                                                          \
2804    }                                                              \
2805                                                                   \
2806    putVSR(xT(opcode), &xt, env);                                  \
2807    float_check_status(env);                                       \
2808}
2809
2810VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2811VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2812VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i  + 1), 0)
2813VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2814
2815/*
2816 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2817 * added to this later.
2818 */
2819void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode)
2820{
2821    ppc_vsr_t xt, xb;
2822    float_status tstat;
2823
2824    getVSR(rB(opcode) + 32, &xb, env);
2825    memset(&xt, 0, sizeof(xt));
2826
2827    tstat = env->fp_status;
2828    if (unlikely(Rc(opcode) != 0)) {
2829        tstat.float_rounding_mode = float_round_to_odd;
2830    }
2831
2832    xt.VsrD(0) = float128_to_float64(xb.f128, &tstat);
2833    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2834    if (unlikely(float128_is_signaling_nan(xb.f128,
2835                                           &tstat))) {
2836        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
2837        xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0));
2838    }
2839    helper_compute_fprf_float64(env, xt.VsrD(0));
2840
2841    putVSR(rD(opcode) + 32, &xt, env);
2842    float_check_status(env);
2843}
2844
2845uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2846{
2847    float_status tstat = env->fp_status;
2848    set_float_exception_flags(0, &tstat);
2849
2850    return (uint64_t)float64_to_float32(xb, &tstat) << 32;
2851}
2852
2853uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2854{
2855    float_status tstat = env->fp_status;
2856    set_float_exception_flags(0, &tstat);
2857
2858    return float32_to_float64(xb >> 32, &tstat);
2859}
2860
2861/* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2862 *   op    - instruction mnemonic
2863 *   nels  - number of elements (1, 2 or 4)
2864 *   stp   - source type (float32 or float64)
2865 *   ttp   - target type (int32, uint32, int64 or uint64)
2866 *   sfld  - source vsr_t field
2867 *   tfld  - target vsr_t field
2868 *   rnan  - resulting NaN
2869 */
2870#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
2871void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2872{                                                                            \
2873    ppc_vsr_t xt, xb;                                                        \
2874    int i;                                                                   \
2875                                                                             \
2876    getVSR(xB(opcode), &xb, env);                                            \
2877    getVSR(xT(opcode), &xt, env);                                            \
2878                                                                             \
2879    for (i = 0; i < nels; i++) {                                             \
2880        if (unlikely(stp##_is_any_nan(xb.sfld))) {                           \
2881            if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) {          \
2882                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);       \
2883            }                                                                \
2884            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
2885            xt.tfld = rnan;                                                  \
2886        } else {                                                             \
2887            xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                \
2888                          &env->fp_status);                                  \
2889            if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2890                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);        \
2891            }                                                                \
2892        }                                                                    \
2893    }                                                                        \
2894                                                                             \
2895    putVSR(xT(opcode), &xt, env);                                            \
2896    float_check_status(env);                                                 \
2897}
2898
2899VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2900                  0x8000000000000000ULL)
2901VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
2902                  0x80000000U)
2903VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
2904VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
2905VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
2906                  0x8000000000000000ULL)
2907VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
2908                  0x80000000U)
2909VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
2910VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
2911VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
2912                  0x8000000000000000ULL)
2913VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
2914VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
2915VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
2916
2917/* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2918 *   op    - instruction mnemonic
2919 *   stp   - source type (float32 or float64)
2920 *   ttp   - target type (int32, uint32, int64 or uint64)
2921 *   sfld  - source vsr_t field
2922 *   tfld  - target vsr_t field
2923 *   rnan  - resulting NaN
2924 */
2925#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan)             \
2926void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2927{                                                                            \
2928    ppc_vsr_t xt, xb;                                                        \
2929                                                                             \
2930    getVSR(rB(opcode) + 32, &xb, env);                                       \
2931    memset(&xt, 0, sizeof(xt));                                              \
2932                                                                             \
2933    if (unlikely(stp##_is_any_nan(xb.sfld))) {                               \
2934        if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) {              \
2935            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);           \
2936        }                                                                    \
2937        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);                \
2938        xt.tfld = rnan;                                                      \
2939    } else {                                                                 \
2940        xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                    \
2941                      &env->fp_status);                                      \
2942        if (env->fp_status.float_exception_flags & float_flag_invalid) {     \
2943            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
2944        }                                                                    \
2945    }                                                                        \
2946                                                                             \
2947    putVSR(rD(opcode) + 32, &xt, env);                                       \
2948    float_check_status(env);                                                 \
2949}
2950
2951VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0),          \
2952                  0x8000000000000000ULL)
2953
2954VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0),          \
2955                  0xffffffff80000000ULL)
2956VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
2957VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
2958
2959/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2960 *   op    - instruction mnemonic
2961 *   nels  - number of elements (1, 2 or 4)
2962 *   stp   - source type (int32, uint32, int64 or uint64)
2963 *   ttp   - target type (float32 or float64)
2964 *   sfld  - source vsr_t field
2965 *   tfld  - target vsr_t field
2966 *   jdef  - definition of the j index (i or 2*i)
2967 *   sfprf - set FPRF
2968 */
2969#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
2970void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2971{                                                                       \
2972    ppc_vsr_t xt, xb;                                                   \
2973    int i;                                                              \
2974                                                                        \
2975    getVSR(xB(opcode), &xb, env);                                       \
2976    getVSR(xT(opcode), &xt, env);                                       \
2977                                                                        \
2978    for (i = 0; i < nels; i++) {                                        \
2979        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
2980        if (r2sp) {                                                     \
2981            xt.tfld = helper_frsp(env, xt.tfld);                        \
2982        }                                                               \
2983        if (sfprf) {                                                    \
2984            helper_compute_fprf_float64(env, xt.tfld);                  \
2985        }                                                               \
2986    }                                                                   \
2987                                                                        \
2988    putVSR(xT(opcode), &xt, env);                                       \
2989    float_check_status(env);                                            \
2990}
2991
2992VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
2993VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
2994VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
2995VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
2996VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
2997VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
2998VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
2999VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
3000VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
3001VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
3002VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3003VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3004
3005/* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3006 *   op    - instruction mnemonic
3007 *   stp   - source type (int32, uint32, int64 or uint64)
3008 *   ttp   - target type (float32 or float64)
3009 *   sfld  - source vsr_t field
3010 *   tfld  - target vsr_t field
3011 */
3012#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld)              \
3013void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
3014{                                                                       \
3015    ppc_vsr_t xt, xb;                                                   \
3016                                                                        \
3017    getVSR(rB(opcode) + 32, &xb, env);                                  \
3018    getVSR(rD(opcode) + 32, &xt, env);                                  \
3019                                                                        \
3020    xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);                 \
3021    helper_compute_fprf_##ttp(env, xt.tfld);                            \
3022                                                                        \
3023    putVSR(xT(opcode) + 32, &xt, env);                                  \
3024    float_check_status(env);                                            \
3025}
3026
3027VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3028VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3029
3030/* For "use current rounding mode", define a value that will not be one of
3031 * the existing rounding model enums.
3032 */
3033#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3034  float_round_up + float_round_to_zero)
3035
3036/* VSX_ROUND - VSX floating point round
3037 *   op    - instruction mnemonic
3038 *   nels  - number of elements (1, 2 or 4)
3039 *   tp    - type (float32 or float64)
3040 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3041 *   rmode - rounding mode
3042 *   sfprf - set FPRF
3043 */
3044#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf)                     \
3045void helper_##op(CPUPPCState *env, uint32_t opcode)                    \
3046{                                                                      \
3047    ppc_vsr_t xt, xb;                                                  \
3048    int i;                                                             \
3049    getVSR(xB(opcode), &xb, env);                                      \
3050    getVSR(xT(opcode), &xt, env);                                      \
3051                                                                       \
3052    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3053        set_float_rounding_mode(rmode, &env->fp_status);               \
3054    }                                                                  \
3055                                                                       \
3056    for (i = 0; i < nels; i++) {                                       \
3057        if (unlikely(tp##_is_signaling_nan(xb.fld,                     \
3058                                           &env->fp_status))) {        \
3059            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);     \
3060            xt.fld = tp##_snan_to_qnan(xb.fld);                        \
3061        } else {                                                       \
3062            xt.fld = tp##_round_to_int(xb.fld, &env->fp_status);       \
3063        }                                                              \
3064        if (sfprf) {                                                   \
3065            helper_compute_fprf_float64(env, xt.fld);                  \
3066        }                                                              \
3067    }                                                                  \
3068                                                                       \
3069    /* If this is not a "use current rounding mode" instruction,       \
3070     * then inhibit setting of the XX bit and restore rounding         \
3071     * mode from FPSCR */                                              \
3072    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3073        fpscr_set_rounding_mode(env);                                  \
3074        env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
3075    }                                                                  \
3076                                                                       \
3077    putVSR(xT(opcode), &xt, env);                                      \
3078    float_check_status(env);                                           \
3079}
3080
3081VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3082VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3083VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3084VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3085VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3086
3087VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3088VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3089VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3090VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3091VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3092
3093VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3094VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3095VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3096VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3097VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3098
3099uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3100{
3101    helper_reset_fpstatus(env);
3102
3103    uint64_t xt = helper_frsp(env, xb);
3104
3105    helper_compute_fprf_float64(env, xt);
3106    float_check_status(env);
3107    return xt;
3108}
3109
3110#define VSX_XXPERM(op, indexed)                                       \
3111void helper_##op(CPUPPCState *env, uint32_t opcode)                   \
3112{                                                                     \
3113    ppc_vsr_t xt, xa, pcv, xto;                                       \
3114    int i, idx;                                                       \
3115                                                                      \
3116    getVSR(xA(opcode), &xa, env);                                     \
3117    getVSR(xT(opcode), &xt, env);                                     \
3118    getVSR(xB(opcode), &pcv, env);                                    \
3119                                                                      \
3120    for (i = 0; i < 16; i++) {                                        \
3121        idx = pcv.VsrB(i) & 0x1F;                                     \
3122        if (indexed) {                                                \
3123            idx = 31 - idx;                                           \
3124        }                                                             \
3125        xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3126    }                                                                 \
3127    putVSR(xT(opcode), &xto, env);                                    \
3128}
3129
3130VSX_XXPERM(xxperm, 0)
3131VSX_XXPERM(xxpermr, 1)
3132
3133void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
3134{
3135    ppc_vsr_t xt, xb;
3136    uint32_t exp, i, fraction;
3137
3138    getVSR(xB(opcode), &xb, env);
3139    memset(&xt, 0, sizeof(xt));
3140
3141    for (i = 0; i < 4; i++) {
3142        exp = (xb.VsrW(i) >> 23) & 0xFF;
3143        fraction = xb.VsrW(i) & 0x7FFFFF;
3144        if (exp != 0 && exp != 255) {
3145            xt.VsrW(i) = fraction | 0x00800000;
3146        } else {
3147            xt.VsrW(i) = fraction;
3148        }
3149    }
3150    putVSR(xT(opcode), &xt, env);
3151}
3152
3153/* VSX_TEST_DC - VSX floating point test data class
3154 *   op    - instruction mnemonic
3155 *   nels  - number of elements (1, 2 or 4)
3156 *   xbn   - VSR register number
3157 *   tp    - type (float32 or float64)
3158 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3159 *   tfld   - target vsr_t field (VsrD(*) or VsrW(*))
3160 *   fld_max - target field max
3161 *   scrf - set result in CR and FPCC
3162 */
3163#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf)  \
3164void helper_##op(CPUPPCState *env, uint32_t opcode)         \
3165{                                                           \
3166    ppc_vsr_t xt, xb;                                       \
3167    uint32_t i, sign, dcmx;                                 \
3168    uint32_t cc, match = 0;                                 \
3169                                                            \
3170    getVSR(xbn, &xb, env);                                  \
3171    if (!scrf) {                                            \
3172        memset(&xt, 0, sizeof(xt));                         \
3173        dcmx = DCMX_XV(opcode);                             \
3174    } else {                                                \
3175        dcmx = DCMX(opcode);                                \
3176    }                                                       \
3177                                                            \
3178    for (i = 0; i < nels; i++) {                            \
3179        sign = tp##_is_neg(xb.fld);                         \
3180        if (tp##_is_any_nan(xb.fld)) {                      \
3181            match = extract32(dcmx, 6, 1);                  \
3182        } else if (tp##_is_infinity(xb.fld)) {              \
3183            match = extract32(dcmx, 4 + !sign, 1);          \
3184        } else if (tp##_is_zero(xb.fld)) {                  \
3185            match = extract32(dcmx, 2 + !sign, 1);          \
3186        } else if (tp##_is_zero_or_denormal(xb.fld)) {      \
3187            match = extract32(dcmx, 0 + !sign, 1);          \
3188        }                                                   \
3189                                                            \
3190        if (scrf) {                                         \
3191            cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT;  \
3192            env->fpscr &= ~(0x0F << FPSCR_FPRF);            \
3193            env->fpscr |= cc << FPSCR_FPRF;                 \
3194            env->crf[BF(opcode)] = cc;                      \
3195        } else {                                            \
3196            xt.tfld = match ? fld_max : 0;                  \
3197        }                                                   \
3198        match = 0;                                          \
3199    }                                                       \
3200    if (!scrf) {                                            \
3201        putVSR(xT(opcode), &xt, env);                       \
3202    }                                                       \
3203}
3204
3205VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3206VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3207VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3208VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3209
3210void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
3211{
3212    ppc_vsr_t xb;
3213    uint32_t dcmx, sign, exp;
3214    uint32_t cc, match = 0, not_sp = 0;
3215
3216    getVSR(xB(opcode), &xb, env);
3217    dcmx = DCMX(opcode);
3218    exp = (xb.VsrD(0) >> 52) & 0x7FF;
3219
3220    sign = float64_is_neg(xb.VsrD(0));
3221    if (float64_is_any_nan(xb.VsrD(0))) {
3222        match = extract32(dcmx, 6, 1);
3223    } else if (float64_is_infinity(xb.VsrD(0))) {
3224        match = extract32(dcmx, 4 + !sign, 1);
3225    } else if (float64_is_zero(xb.VsrD(0))) {
3226        match = extract32(dcmx, 2 + !sign, 1);
3227    } else if (float64_is_zero_or_denormal(xb.VsrD(0)) ||
3228               (exp > 0 && exp < 0x381)) {
3229        match = extract32(dcmx, 0 + !sign, 1);
3230    }
3231
3232    not_sp = !float64_eq(xb.VsrD(0),
3233                         float32_to_float64(
3234                             float64_to_float32(xb.VsrD(0), &env->fp_status),
3235                             &env->fp_status), &env->fp_status);
3236
3237    cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3238    env->fpscr &= ~(0x0F << FPSCR_FPRF);
3239    env->fpscr |= cc << FPSCR_FPRF;
3240    env->crf[BF(opcode)] = cc;
3241}
3242
3243void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
3244{
3245    ppc_vsr_t xb;
3246    ppc_vsr_t xt;
3247    uint8_t r = Rrm(opcode);
3248    uint8_t ex = Rc(opcode);
3249    uint8_t rmc = RMC(opcode);
3250    uint8_t rmode = 0;
3251    float_status tstat;
3252
3253    getVSR(rB(opcode) + 32, &xb, env);
3254    memset(&xt, 0, sizeof(xt));
3255    helper_reset_fpstatus(env);
3256
3257    if (r == 0 && rmc == 0) {
3258        rmode = float_round_ties_away;
3259    } else if (r == 0 && rmc == 0x3) {
3260        rmode = fpscr_rn;
3261    } else if (r == 1) {
3262        switch (rmc) {
3263        case 0:
3264            rmode = float_round_nearest_even;
3265            break;
3266        case 1:
3267            rmode = float_round_to_zero;
3268            break;
3269        case 2:
3270            rmode = float_round_up;
3271            break;
3272        case 3:
3273            rmode = float_round_down;
3274            break;
3275        default:
3276            abort();
3277        }
3278    }
3279
3280    tstat = env->fp_status;
3281    set_float_exception_flags(0, &tstat);
3282    set_float_rounding_mode(rmode, &tstat);
3283    xt.f128 = float128_round_to_int(xb.f128, &tstat);
3284    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3285
3286    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3287        if (float128_is_signaling_nan(xb.f128, &tstat)) {
3288            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
3289            xt.f128 = float128_snan_to_qnan(xt.f128);
3290        }
3291    }
3292
3293    if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3294        env->fp_status.float_exception_flags &= ~float_flag_inexact;
3295    }
3296
3297    helper_compute_fprf_float128(env, xt.f128);
3298    float_check_status(env);
3299    putVSR(rD(opcode) + 32, &xt, env);
3300}
3301
3302void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
3303{
3304    ppc_vsr_t xb;
3305    ppc_vsr_t xt;
3306    uint8_t r = Rrm(opcode);
3307    uint8_t rmc = RMC(opcode);
3308    uint8_t rmode = 0;
3309    floatx80 round_res;
3310    float_status tstat;
3311
3312    getVSR(rB(opcode) + 32, &xb, env);
3313    memset(&xt, 0, sizeof(xt));
3314    helper_reset_fpstatus(env);
3315
3316    if (r == 0 && rmc == 0) {
3317        rmode = float_round_ties_away;
3318    } else if (r == 0 && rmc == 0x3) {
3319        rmode = fpscr_rn;
3320    } else if (r == 1) {
3321        switch (rmc) {
3322        case 0:
3323            rmode = float_round_nearest_even;
3324            break;
3325        case 1:
3326            rmode = float_round_to_zero;
3327            break;
3328        case 2:
3329            rmode = float_round_up;
3330            break;
3331        case 3:
3332            rmode = float_round_down;
3333            break;
3334        default:
3335            abort();
3336        }
3337    }
3338
3339    tstat = env->fp_status;
3340    set_float_exception_flags(0, &tstat);
3341    set_float_rounding_mode(rmode, &tstat);
3342    round_res = float128_to_floatx80(xb.f128, &tstat);
3343    xt.f128 = floatx80_to_float128(round_res, &tstat);
3344    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3345
3346    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3347        if (float128_is_signaling_nan(xb.f128, &tstat)) {
3348            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
3349            xt.f128 = float128_snan_to_qnan(xt.f128);
3350        }
3351    }
3352
3353    helper_compute_fprf_float128(env, xt.f128);
3354    putVSR(rD(opcode) + 32, &xt, env);
3355    float_check_status(env);
3356}
3357
3358void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
3359{
3360    ppc_vsr_t xb;
3361    ppc_vsr_t xt;
3362    float_status tstat;
3363
3364    getVSR(rB(opcode) + 32, &xb, env);
3365    memset(&xt, 0, sizeof(xt));
3366    helper_reset_fpstatus(env);
3367
3368    tstat = env->fp_status;
3369    if (unlikely(Rc(opcode) != 0)) {
3370        tstat.float_rounding_mode = float_round_to_odd;
3371    }
3372
3373    set_float_exception_flags(0, &tstat);
3374    xt.f128 = float128_sqrt(xb.f128, &tstat);
3375    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3376
3377    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3378        if (float128_is_signaling_nan(xb.f128, &tstat)) {
3379            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
3380            xt.f128 = float128_snan_to_qnan(xb.f128);
3381        } else if  (float128_is_quiet_nan(xb.f128, &tstat)) {
3382            xt.f128 = xb.f128;
3383        } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) {
3384            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
3385            set_snan_bit_is_one(0, &env->fp_status);
3386            xt.f128 = float128_default_nan(&env->fp_status);
3387        }
3388    }
3389
3390    helper_compute_fprf_float128(env, xt.f128);
3391    putVSR(rD(opcode) + 32, &xt, env);
3392    float_check_status(env);
3393}
3394
3395void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
3396{
3397    ppc_vsr_t xt, xa, xb;
3398    float_status tstat;
3399
3400    getVSR(rA(opcode) + 32, &xa, env);
3401    getVSR(rB(opcode) + 32, &xb, env);
3402    getVSR(rD(opcode) + 32, &xt, env);
3403    helper_reset_fpstatus(env);
3404
3405    tstat = env->fp_status;
3406    if (unlikely(Rc(opcode) != 0)) {
3407        tstat.float_rounding_mode = float_round_to_odd;
3408    }
3409
3410    set_float_exception_flags(0, &tstat);
3411    xt.f128 = float128_sub(xa.f128, xb.f128, &tstat);
3412    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3413
3414    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3415        if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
3416            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
3417        } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
3418                   float128_is_signaling_nan(xb.f128, &tstat)) {
3419            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
3420        }
3421    }
3422
3423    helper_compute_fprf_float128(env, xt.f128);
3424    putVSR(rD(opcode) + 32, &xt, env);
3425    float_check_status(env);
3426}
3427