qemu/target/ppc/fpu_helper.c
<<
>>
Prefs
   1/*
   2 *  PowerPC floating point and SPE emulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "exec/helper-proto.h"
  22#include "exec/exec-all.h"
  23#include "internal.h"
  24
  25static inline float128 float128_snan_to_qnan(float128 x)
  26{
  27    float128 r;
  28
  29    r.high = x.high | 0x0000800000000000;
  30    r.low = x.low;
  31    return r;
  32}
  33
  34#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
  35#define float32_snan_to_qnan(x) ((x) | 0x00400000)
  36#define float16_snan_to_qnan(x) ((x) | 0x0200)
  37
  38/*****************************************************************************/
  39/* Floating point operations helpers */
  40uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
  41{
  42    CPU_FloatU f;
  43    CPU_DoubleU d;
  44
  45    f.l = arg;
  46    d.d = float32_to_float64(f.f, &env->fp_status);
  47    return d.ll;
  48}
  49
  50uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
  51{
  52    CPU_FloatU f;
  53    CPU_DoubleU d;
  54
  55    d.ll = arg;
  56    f.f = float64_to_float32(d.d, &env->fp_status);
  57    return f.l;
  58}
  59
  60static inline int ppc_float32_get_unbiased_exp(float32 f)
  61{
  62    return ((f >> 23) & 0xFF) - 127;
  63}
  64
  65static inline int ppc_float64_get_unbiased_exp(float64 f)
  66{
  67    return ((f >> 52) & 0x7FF) - 1023;
  68}
  69
  70#define COMPUTE_FPRF(tp)                                       \
  71void helper_compute_fprf_##tp(CPUPPCState *env, tp arg)        \
  72{                                                              \
  73    int isneg;                                                 \
  74    int fprf;                                                  \
  75                                                               \
  76    isneg = tp##_is_neg(arg);                                  \
  77    if (unlikely(tp##_is_any_nan(arg))) {                      \
  78        if (tp##_is_signaling_nan(arg, &env->fp_status)) {     \
  79            /* Signaling NaN: flags are undefined */           \
  80            fprf = 0x00;                                       \
  81        } else {                                               \
  82            /* Quiet NaN */                                    \
  83            fprf = 0x11;                                       \
  84        }                                                      \
  85    } else if (unlikely(tp##_is_infinity(arg))) {              \
  86        /* +/- infinity */                                     \
  87        if (isneg) {                                           \
  88            fprf = 0x09;                                       \
  89        } else {                                               \
  90            fprf = 0x05;                                       \
  91        }                                                      \
  92    } else {                                                   \
  93        if (tp##_is_zero(arg)) {                               \
  94            /* +/- zero */                                     \
  95            if (isneg) {                                       \
  96                fprf = 0x12;                                   \
  97            } else {                                           \
  98                fprf = 0x02;                                   \
  99            }                                                  \
 100        } else {                                               \
 101            if (tp##_is_zero_or_denormal(arg)) {               \
 102                /* Denormalized numbers */                     \
 103                fprf = 0x10;                                   \
 104            } else {                                           \
 105                /* Normalized numbers */                       \
 106                fprf = 0x00;                                   \
 107            }                                                  \
 108            if (isneg) {                                       \
 109                fprf |= 0x08;                                  \
 110            } else {                                           \
 111                fprf |= 0x04;                                  \
 112            }                                                  \
 113        }                                                      \
 114    }                                                          \
 115    /* We update FPSCR_FPRF */                                 \
 116    env->fpscr &= ~(0x1F << FPSCR_FPRF);                       \
 117    env->fpscr |= fprf << FPSCR_FPRF;                          \
 118}
 119
 120COMPUTE_FPRF(float16)
 121COMPUTE_FPRF(float32)
 122COMPUTE_FPRF(float64)
 123COMPUTE_FPRF(float128)
 124
 125/* Floating-point invalid operations exception */
 126static inline __attribute__((__always_inline__))
 127uint64_t float_invalid_op_excp(CPUPPCState *env, int op, int set_fpcc)
 128{
 129    CPUState *cs = CPU(ppc_env_get_cpu(env));
 130    uint64_t ret = 0;
 131    int ve;
 132
 133    ve = fpscr_ve;
 134    switch (op) {
 135    case POWERPC_EXCP_FP_VXSNAN:
 136        env->fpscr |= 1 << FPSCR_VXSNAN;
 137        break;
 138    case POWERPC_EXCP_FP_VXSOFT:
 139        env->fpscr |= 1 << FPSCR_VXSOFT;
 140        break;
 141    case POWERPC_EXCP_FP_VXISI:
 142        /* Magnitude subtraction of infinities */
 143        env->fpscr |= 1 << FPSCR_VXISI;
 144        goto update_arith;
 145    case POWERPC_EXCP_FP_VXIDI:
 146        /* Division of infinity by infinity */
 147        env->fpscr |= 1 << FPSCR_VXIDI;
 148        goto update_arith;
 149    case POWERPC_EXCP_FP_VXZDZ:
 150        /* Division of zero by zero */
 151        env->fpscr |= 1 << FPSCR_VXZDZ;
 152        goto update_arith;
 153    case POWERPC_EXCP_FP_VXIMZ:
 154        /* Multiplication of zero by infinity */
 155        env->fpscr |= 1 << FPSCR_VXIMZ;
 156        goto update_arith;
 157    case POWERPC_EXCP_FP_VXVC:
 158        /* Ordered comparison of NaN */
 159        env->fpscr |= 1 << FPSCR_VXVC;
 160        if (set_fpcc) {
 161            env->fpscr &= ~(0xF << FPSCR_FPCC);
 162            env->fpscr |= 0x11 << FPSCR_FPCC;
 163        }
 164        /* We must update the target FPR before raising the exception */
 165        if (ve != 0) {
 166            cs->exception_index = POWERPC_EXCP_PROGRAM;
 167            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
 168            /* Update the floating-point enabled exception summary */
 169            env->fpscr |= 1 << FPSCR_FEX;
 170            /* Exception is differed */
 171            ve = 0;
 172        }
 173        break;
 174    case POWERPC_EXCP_FP_VXSQRT:
 175        /* Square root of a negative number */
 176        env->fpscr |= 1 << FPSCR_VXSQRT;
 177    update_arith:
 178        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 179        if (ve == 0) {
 180            /* Set the result to quiet NaN */
 181            ret = 0x7FF8000000000000ULL;
 182            if (set_fpcc) {
 183                env->fpscr &= ~(0xF << FPSCR_FPCC);
 184                env->fpscr |= 0x11 << FPSCR_FPCC;
 185            }
 186        }
 187        break;
 188    case POWERPC_EXCP_FP_VXCVI:
 189        /* Invalid conversion */
 190        env->fpscr |= 1 << FPSCR_VXCVI;
 191        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 192        if (ve == 0) {
 193            /* Set the result to quiet NaN */
 194            ret = 0x7FF8000000000000ULL;
 195            if (set_fpcc) {
 196                env->fpscr &= ~(0xF << FPSCR_FPCC);
 197                env->fpscr |= 0x11 << FPSCR_FPCC;
 198            }
 199        }
 200        break;
 201    }
 202    /* Update the floating-point invalid operation summary */
 203    env->fpscr |= 1 << FPSCR_VX;
 204    /* Update the floating-point exception summary */
 205    env->fpscr |= FP_FX;
 206    if (ve != 0) {
 207        /* Update the floating-point enabled exception summary */
 208        env->fpscr |= 1 << FPSCR_FEX;
 209        if (msr_fe0 != 0 || msr_fe1 != 0) {
 210            /* GETPC() works here because this is inline */
 211            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 212                                   POWERPC_EXCP_FP | op, GETPC());
 213        }
 214    }
 215    return ret;
 216}
 217
 218static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
 219{
 220    env->fpscr |= 1 << FPSCR_ZX;
 221    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 222    /* Update the floating-point exception summary */
 223    env->fpscr |= FP_FX;
 224    if (fpscr_ze != 0) {
 225        /* Update the floating-point enabled exception summary */
 226        env->fpscr |= 1 << FPSCR_FEX;
 227        if (msr_fe0 != 0 || msr_fe1 != 0) {
 228            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 229                                   POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
 230                                   raddr);
 231        }
 232    }
 233}
 234
 235static inline void float_overflow_excp(CPUPPCState *env)
 236{
 237    CPUState *cs = CPU(ppc_env_get_cpu(env));
 238
 239    env->fpscr |= 1 << FPSCR_OX;
 240    /* Update the floating-point exception summary */
 241    env->fpscr |= FP_FX;
 242    if (fpscr_oe != 0) {
 243        /* XXX: should adjust the result */
 244        /* Update the floating-point enabled exception summary */
 245        env->fpscr |= 1 << FPSCR_FEX;
 246        /* We must update the target FPR before raising the exception */
 247        cs->exception_index = POWERPC_EXCP_PROGRAM;
 248        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 249    } else {
 250        env->fpscr |= 1 << FPSCR_XX;
 251        env->fpscr |= 1 << FPSCR_FI;
 252    }
 253}
 254
 255static inline void float_underflow_excp(CPUPPCState *env)
 256{
 257    CPUState *cs = CPU(ppc_env_get_cpu(env));
 258
 259    env->fpscr |= 1 << FPSCR_UX;
 260    /* Update the floating-point exception summary */
 261    env->fpscr |= FP_FX;
 262    if (fpscr_ue != 0) {
 263        /* XXX: should adjust the result */
 264        /* Update the floating-point enabled exception summary */
 265        env->fpscr |= 1 << FPSCR_FEX;
 266        /* We must update the target FPR before raising the exception */
 267        cs->exception_index = POWERPC_EXCP_PROGRAM;
 268        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 269    }
 270}
 271
 272static inline void float_inexact_excp(CPUPPCState *env)
 273{
 274    CPUState *cs = CPU(ppc_env_get_cpu(env));
 275
 276    env->fpscr |= 1 << FPSCR_XX;
 277    /* Update the floating-point exception summary */
 278    env->fpscr |= FP_FX;
 279    if (fpscr_xe != 0) {
 280        /* Update the floating-point enabled exception summary */
 281        env->fpscr |= 1 << FPSCR_FEX;
 282        /* We must update the target FPR before raising the exception */
 283        cs->exception_index = POWERPC_EXCP_PROGRAM;
 284        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 285    }
 286}
 287
 288static inline void fpscr_set_rounding_mode(CPUPPCState *env)
 289{
 290    int rnd_type;
 291
 292    /* Set rounding mode */
 293    switch (fpscr_rn) {
 294    case 0:
 295        /* Best approximation (round to nearest) */
 296        rnd_type = float_round_nearest_even;
 297        break;
 298    case 1:
 299        /* Smaller magnitude (round toward zero) */
 300        rnd_type = float_round_to_zero;
 301        break;
 302    case 2:
 303        /* Round toward +infinite */
 304        rnd_type = float_round_up;
 305        break;
 306    default:
 307    case 3:
 308        /* Round toward -infinite */
 309        rnd_type = float_round_down;
 310        break;
 311    }
 312    set_float_rounding_mode(rnd_type, &env->fp_status);
 313}
 314
 315void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
 316{
 317    int prev;
 318
 319    prev = (env->fpscr >> bit) & 1;
 320    env->fpscr &= ~(1 << bit);
 321    if (prev == 1) {
 322        switch (bit) {
 323        case FPSCR_RN1:
 324        case FPSCR_RN:
 325            fpscr_set_rounding_mode(env);
 326            break;
 327        default:
 328            break;
 329        }
 330    }
 331}
 332
 333void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
 334{
 335    CPUState *cs = CPU(ppc_env_get_cpu(env));
 336    int prev;
 337
 338    prev = (env->fpscr >> bit) & 1;
 339    env->fpscr |= 1 << bit;
 340    if (prev == 0) {
 341        switch (bit) {
 342        case FPSCR_VX:
 343            env->fpscr |= FP_FX;
 344            if (fpscr_ve) {
 345                goto raise_ve;
 346            }
 347            break;
 348        case FPSCR_OX:
 349            env->fpscr |= FP_FX;
 350            if (fpscr_oe) {
 351                goto raise_oe;
 352            }
 353            break;
 354        case FPSCR_UX:
 355            env->fpscr |= FP_FX;
 356            if (fpscr_ue) {
 357                goto raise_ue;
 358            }
 359            break;
 360        case FPSCR_ZX:
 361            env->fpscr |= FP_FX;
 362            if (fpscr_ze) {
 363                goto raise_ze;
 364            }
 365            break;
 366        case FPSCR_XX:
 367            env->fpscr |= FP_FX;
 368            if (fpscr_xe) {
 369                goto raise_xe;
 370            }
 371            break;
 372        case FPSCR_VXSNAN:
 373        case FPSCR_VXISI:
 374        case FPSCR_VXIDI:
 375        case FPSCR_VXZDZ:
 376        case FPSCR_VXIMZ:
 377        case FPSCR_VXVC:
 378        case FPSCR_VXSOFT:
 379        case FPSCR_VXSQRT:
 380        case FPSCR_VXCVI:
 381            env->fpscr |= 1 << FPSCR_VX;
 382            env->fpscr |= FP_FX;
 383            if (fpscr_ve != 0) {
 384                goto raise_ve;
 385            }
 386            break;
 387        case FPSCR_VE:
 388            if (fpscr_vx != 0) {
 389            raise_ve:
 390                env->error_code = POWERPC_EXCP_FP;
 391                if (fpscr_vxsnan) {
 392                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
 393                }
 394                if (fpscr_vxisi) {
 395                    env->error_code |= POWERPC_EXCP_FP_VXISI;
 396                }
 397                if (fpscr_vxidi) {
 398                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
 399                }
 400                if (fpscr_vxzdz) {
 401                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
 402                }
 403                if (fpscr_vximz) {
 404                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
 405                }
 406                if (fpscr_vxvc) {
 407                    env->error_code |= POWERPC_EXCP_FP_VXVC;
 408                }
 409                if (fpscr_vxsoft) {
 410                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
 411                }
 412                if (fpscr_vxsqrt) {
 413                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
 414                }
 415                if (fpscr_vxcvi) {
 416                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
 417                }
 418                goto raise_excp;
 419            }
 420            break;
 421        case FPSCR_OE:
 422            if (fpscr_ox != 0) {
 423            raise_oe:
 424                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 425                goto raise_excp;
 426            }
 427            break;
 428        case FPSCR_UE:
 429            if (fpscr_ux != 0) {
 430            raise_ue:
 431                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 432                goto raise_excp;
 433            }
 434            break;
 435        case FPSCR_ZE:
 436            if (fpscr_zx != 0) {
 437            raise_ze:
 438                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
 439                goto raise_excp;
 440            }
 441            break;
 442        case FPSCR_XE:
 443            if (fpscr_xx != 0) {
 444            raise_xe:
 445                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 446                goto raise_excp;
 447            }
 448            break;
 449        case FPSCR_RN1:
 450        case FPSCR_RN:
 451            fpscr_set_rounding_mode(env);
 452            break;
 453        default:
 454            break;
 455        raise_excp:
 456            /* Update the floating-point enabled exception summary */
 457            env->fpscr |= 1 << FPSCR_FEX;
 458            /* We have to update Rc1 before raising the exception */
 459            cs->exception_index = POWERPC_EXCP_PROGRAM;
 460            break;
 461        }
 462    }
 463}
 464
 465void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 466{
 467    CPUState *cs = CPU(ppc_env_get_cpu(env));
 468    target_ulong prev, new;
 469    int i;
 470
 471    prev = env->fpscr;
 472    new = (target_ulong)arg;
 473    new &= ~0x60000000LL;
 474    new |= prev & 0x60000000LL;
 475    for (i = 0; i < sizeof(target_ulong) * 2; i++) {
 476        if (mask & (1 << i)) {
 477            env->fpscr &= ~(0xFLL << (4 * i));
 478            env->fpscr |= new & (0xFLL << (4 * i));
 479        }
 480    }
 481    /* Update VX and FEX */
 482    if (fpscr_ix != 0) {
 483        env->fpscr |= 1 << FPSCR_VX;
 484    } else {
 485        env->fpscr &= ~(1 << FPSCR_VX);
 486    }
 487    if ((fpscr_ex & fpscr_eex) != 0) {
 488        env->fpscr |= 1 << FPSCR_FEX;
 489        cs->exception_index = POWERPC_EXCP_PROGRAM;
 490        /* XXX: we should compute it properly */
 491        env->error_code = POWERPC_EXCP_FP;
 492    } else {
 493        env->fpscr &= ~(1 << FPSCR_FEX);
 494    }
 495    fpscr_set_rounding_mode(env);
 496}
 497
 498void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 499{
 500    helper_store_fpscr(env, arg, mask);
 501}
 502
 503static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
 504{
 505    CPUState *cs = CPU(ppc_env_get_cpu(env));
 506    int status = get_float_exception_flags(&env->fp_status);
 507
 508    if (status & float_flag_divbyzero) {
 509        float_zero_divide_excp(env, raddr);
 510    } else if (status & float_flag_overflow) {
 511        float_overflow_excp(env);
 512    } else if (status & float_flag_underflow) {
 513        float_underflow_excp(env);
 514    } else if (status & float_flag_inexact) {
 515        float_inexact_excp(env);
 516    }
 517
 518    if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
 519        (env->error_code & POWERPC_EXCP_FP)) {
 520        /* Differred floating-point exception after target FPR update */
 521        if (msr_fe0 != 0 || msr_fe1 != 0) {
 522            raise_exception_err_ra(env, cs->exception_index,
 523                                   env->error_code, raddr);
 524        }
 525    }
 526}
 527
 528static inline  __attribute__((__always_inline__))
 529void float_check_status(CPUPPCState *env)
 530{
 531    /* GETPC() works here because this is inline */
 532    do_float_check_status(env, GETPC());
 533}
 534
 535void helper_float_check_status(CPUPPCState *env)
 536{
 537    do_float_check_status(env, GETPC());
 538}
 539
 540void helper_reset_fpstatus(CPUPPCState *env)
 541{
 542    set_float_exception_flags(0, &env->fp_status);
 543}
 544
 545/* fadd - fadd. */
 546uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
 547{
 548    CPU_DoubleU farg1, farg2;
 549
 550    farg1.ll = arg1;
 551    farg2.ll = arg2;
 552
 553    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
 554                 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
 555        /* Magnitude subtraction of infinities */
 556        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
 557    } else {
 558        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
 559                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
 560            /* sNaN addition */
 561            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 562        }
 563        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
 564    }
 565
 566    return farg1.ll;
 567}
 568
 569/* fsub - fsub. */
 570uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
 571{
 572    CPU_DoubleU farg1, farg2;
 573
 574    farg1.ll = arg1;
 575    farg2.ll = arg2;
 576
 577    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
 578                 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
 579        /* Magnitude subtraction of infinities */
 580        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
 581    } else {
 582        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
 583                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
 584            /* sNaN subtraction */
 585            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 586        }
 587        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
 588    }
 589
 590    return farg1.ll;
 591}
 592
 593/* fmul - fmul. */
 594uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
 595{
 596    CPU_DoubleU farg1, farg2;
 597
 598    farg1.ll = arg1;
 599    farg2.ll = arg2;
 600
 601    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
 602                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
 603        /* Multiplication of zero by infinity */
 604        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
 605    } else {
 606        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
 607                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
 608            /* sNaN multiplication */
 609            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 610        }
 611        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
 612    }
 613
 614    return farg1.ll;
 615}
 616
 617/* fdiv - fdiv. */
 618uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
 619{
 620    CPU_DoubleU farg1, farg2;
 621
 622    farg1.ll = arg1;
 623    farg2.ll = arg2;
 624
 625    if (unlikely(float64_is_infinity(farg1.d) &&
 626                 float64_is_infinity(farg2.d))) {
 627        /* Division of infinity by infinity */
 628        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
 629    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
 630        /* Division of zero by zero */
 631        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
 632    } else {
 633        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
 634                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
 635            /* sNaN division */
 636            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 637        }
 638        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
 639    }
 640
 641    return farg1.ll;
 642}
 643
 644
 645#define FPU_FCTI(op, cvt, nanval)                                      \
 646uint64_t helper_##op(CPUPPCState *env, uint64_t arg)                   \
 647{                                                                      \
 648    CPU_DoubleU farg;                                                  \
 649                                                                       \
 650    farg.ll = arg;                                                     \
 651    farg.ll = float64_to_##cvt(farg.d, &env->fp_status);               \
 652                                                                       \
 653    if (unlikely(env->fp_status.float_exception_flags)) {              \
 654        if (float64_is_any_nan(arg)) {                                 \
 655            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
 656            if (float64_is_signaling_nan(arg, &env->fp_status)) {      \
 657                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
 658            }                                                          \
 659            farg.ll = nanval;                                          \
 660        } else if (env->fp_status.float_exception_flags &              \
 661                   float_flag_invalid) {                               \
 662            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
 663        }                                                              \
 664        float_check_status(env);                                       \
 665    }                                                                  \
 666    return farg.ll;                                                    \
 667 }
 668
 669FPU_FCTI(fctiw, int32, 0x80000000U)
 670FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
 671FPU_FCTI(fctiwu, uint32, 0x00000000U)
 672FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
 673FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
 674FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
 675FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
 676FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
 677
 678#define FPU_FCFI(op, cvtr, is_single)                      \
 679uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
 680{                                                          \
 681    CPU_DoubleU farg;                                      \
 682                                                           \
 683    if (is_single) {                                       \
 684        float32 tmp = cvtr(arg, &env->fp_status);          \
 685        farg.d = float32_to_float64(tmp, &env->fp_status); \
 686    } else {                                               \
 687        farg.d = cvtr(arg, &env->fp_status);               \
 688    }                                                      \
 689    float_check_status(env);                               \
 690    return farg.ll;                                        \
 691}
 692
 693FPU_FCFI(fcfid, int64_to_float64, 0)
 694FPU_FCFI(fcfids, int64_to_float32, 1)
 695FPU_FCFI(fcfidu, uint64_to_float64, 0)
 696FPU_FCFI(fcfidus, uint64_to_float32, 1)
 697
 698static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
 699                              int rounding_mode)
 700{
 701    CPU_DoubleU farg;
 702
 703    farg.ll = arg;
 704
 705    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 706        /* sNaN round */
 707        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 708        farg.ll = arg | 0x0008000000000000ULL;
 709    } else {
 710        int inexact = get_float_exception_flags(&env->fp_status) &
 711                      float_flag_inexact;
 712        set_float_rounding_mode(rounding_mode, &env->fp_status);
 713        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
 714        /* Restore rounding mode from FPSCR */
 715        fpscr_set_rounding_mode(env);
 716
 717        /* fri* does not set FPSCR[XX] */
 718        if (!inexact) {
 719            env->fp_status.float_exception_flags &= ~float_flag_inexact;
 720        }
 721    }
 722    float_check_status(env);
 723    return farg.ll;
 724}
 725
 726uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
 727{
 728    return do_fri(env, arg, float_round_ties_away);
 729}
 730
 731uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
 732{
 733    return do_fri(env, arg, float_round_to_zero);
 734}
 735
 736uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
 737{
 738    return do_fri(env, arg, float_round_up);
 739}
 740
 741uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
 742{
 743    return do_fri(env, arg, float_round_down);
 744}
 745
 746#define FPU_MADDSUB_UPDATE(NAME, TP)                                    \
 747static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3,           \
 748                 unsigned int madd_flags)                               \
 749{                                                                       \
 750    if (TP##_is_signaling_nan(arg1, &env->fp_status) ||                 \
 751        TP##_is_signaling_nan(arg2, &env->fp_status) ||                 \
 752        TP##_is_signaling_nan(arg3, &env->fp_status)) {                 \
 753        /* sNaN operation */                                            \
 754        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);          \
 755    }                                                                   \
 756    if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) ||               \
 757        (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) {               \
 758        /* Multiplication of zero by infinity */                        \
 759        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);           \
 760    }                                                                   \
 761    if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) &&           \
 762        TP##_is_infinity(arg3)) {                                       \
 763        uint8_t aSign, bSign, cSign;                                    \
 764                                                                        \
 765        aSign = TP##_is_neg(arg1);                                      \
 766        bSign = TP##_is_neg(arg2);                                      \
 767        cSign = TP##_is_neg(arg3);                                      \
 768        if (madd_flags & float_muladd_negate_c) {                       \
 769            cSign ^= 1;                                                 \
 770        }                                                               \
 771        if (aSign ^ bSign ^ cSign) {                                    \
 772            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);       \
 773        }                                                               \
 774    }                                                                   \
 775}
 776FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
 777FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
 778
 779#define FPU_FMADD(op, madd_flags)                                       \
 780uint64_t helper_##op(CPUPPCState *env, uint64_t arg1,                   \
 781                     uint64_t arg2, uint64_t arg3)                      \
 782{                                                                       \
 783    uint32_t flags;                                                     \
 784    float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags,          \
 785                                 &env->fp_status);                      \
 786    flags = get_float_exception_flags(&env->fp_status);                 \
 787    if (flags) {                                                        \
 788        if (flags & float_flag_invalid) {                               \
 789            float64_maddsub_update_excp(env, arg1, arg2, arg3,          \
 790                                        madd_flags);                    \
 791        }                                                               \
 792        float_check_status(env);                                        \
 793    }                                                                   \
 794    return ret;                                                         \
 795}
 796
 797#define MADD_FLGS 0
 798#define MSUB_FLGS float_muladd_negate_c
 799#define NMADD_FLGS float_muladd_negate_result
 800#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
 801
 802FPU_FMADD(fmadd, MADD_FLGS)
 803FPU_FMADD(fnmadd, NMADD_FLGS)
 804FPU_FMADD(fmsub, MSUB_FLGS)
 805FPU_FMADD(fnmsub, NMSUB_FLGS)
 806
 807/* frsp - frsp. */
 808uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
 809{
 810    CPU_DoubleU farg;
 811    float32 f32;
 812
 813    farg.ll = arg;
 814
 815    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 816        /* sNaN square root */
 817        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 818    }
 819    f32 = float64_to_float32(farg.d, &env->fp_status);
 820    farg.d = float32_to_float64(f32, &env->fp_status);
 821
 822    return farg.ll;
 823}
 824
 825/* fsqrt - fsqrt. */
 826uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
 827{
 828    CPU_DoubleU farg;
 829
 830    farg.ll = arg;
 831
 832    if (unlikely(float64_is_any_nan(farg.d))) {
 833        if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 834            /* sNaN reciprocal square root */
 835            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 836            farg.ll = float64_snan_to_qnan(farg.ll);
 837        }
 838    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
 839        /* Square root of a negative nonzero number */
 840        farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
 841    } else {
 842        farg.d = float64_sqrt(farg.d, &env->fp_status);
 843    }
 844    return farg.ll;
 845}
 846
 847/* fre - fre. */
 848uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
 849{
 850    CPU_DoubleU farg;
 851
 852    farg.ll = arg;
 853
 854    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 855        /* sNaN reciprocal */
 856        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 857    }
 858    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
 859    return farg.d;
 860}
 861
 862/* fres - fres. */
 863uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
 864{
 865    CPU_DoubleU farg;
 866    float32 f32;
 867
 868    farg.ll = arg;
 869
 870    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 871        /* sNaN reciprocal */
 872        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 873    }
 874    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
 875    f32 = float64_to_float32(farg.d, &env->fp_status);
 876    farg.d = float32_to_float64(f32, &env->fp_status);
 877
 878    return farg.ll;
 879}
 880
 881/* frsqrte  - frsqrte. */
 882uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
 883{
 884    CPU_DoubleU farg;
 885
 886    farg.ll = arg;
 887
 888    if (unlikely(float64_is_any_nan(farg.d))) {
 889        if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 890            /* sNaN reciprocal square root */
 891            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 892            farg.ll = float64_snan_to_qnan(farg.ll);
 893        }
 894    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
 895        /* Reciprocal square root of a negative nonzero number */
 896        farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
 897    } else {
 898        farg.d = float64_sqrt(farg.d, &env->fp_status);
 899        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
 900    }
 901
 902    return farg.ll;
 903}
 904
 905/* fsel - fsel. */
 906uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
 907                     uint64_t arg3)
 908{
 909    CPU_DoubleU farg1;
 910
 911    farg1.ll = arg1;
 912
 913    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
 914        !float64_is_any_nan(farg1.d)) {
 915        return arg2;
 916    } else {
 917        return arg3;
 918    }
 919}
 920
 921uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
 922{
 923    int fe_flag = 0;
 924    int fg_flag = 0;
 925
 926    if (unlikely(float64_is_infinity(fra) ||
 927                 float64_is_infinity(frb) ||
 928                 float64_is_zero(frb))) {
 929        fe_flag = 1;
 930        fg_flag = 1;
 931    } else {
 932        int e_a = ppc_float64_get_unbiased_exp(fra);
 933        int e_b = ppc_float64_get_unbiased_exp(frb);
 934
 935        if (unlikely(float64_is_any_nan(fra) ||
 936                     float64_is_any_nan(frb))) {
 937            fe_flag = 1;
 938        } else if ((e_b <= -1022) || (e_b >= 1021)) {
 939            fe_flag = 1;
 940        } else if (!float64_is_zero(fra) &&
 941                   (((e_a - e_b) >= 1023) ||
 942                    ((e_a - e_b) <= -1021) ||
 943                    (e_a <= -970))) {
 944            fe_flag = 1;
 945        }
 946
 947        if (unlikely(float64_is_zero_or_denormal(frb))) {
 948            /* XB is not zero because of the above check and */
 949            /* so must be denormalized.                      */
 950            fg_flag = 1;
 951        }
 952    }
 953
 954    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
 955}
 956
 957uint32_t helper_ftsqrt(uint64_t frb)
 958{
 959    int fe_flag = 0;
 960    int fg_flag = 0;
 961
 962    if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
 963        fe_flag = 1;
 964        fg_flag = 1;
 965    } else {
 966        int e_b = ppc_float64_get_unbiased_exp(frb);
 967
 968        if (unlikely(float64_is_any_nan(frb))) {
 969            fe_flag = 1;
 970        } else if (unlikely(float64_is_zero(frb))) {
 971            fe_flag = 1;
 972        } else if (unlikely(float64_is_neg(frb))) {
 973            fe_flag = 1;
 974        } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
 975            fe_flag = 1;
 976        }
 977
 978        if (unlikely(float64_is_zero_or_denormal(frb))) {
 979            /* XB is not zero because of the above check and */
 980            /* therefore must be denormalized.               */
 981            fg_flag = 1;
 982        }
 983    }
 984
 985    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
 986}
 987
 988void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
 989                  uint32_t crfD)
 990{
 991    CPU_DoubleU farg1, farg2;
 992    uint32_t ret = 0;
 993
 994    farg1.ll = arg1;
 995    farg2.ll = arg2;
 996
 997    if (unlikely(float64_is_any_nan(farg1.d) ||
 998                 float64_is_any_nan(farg2.d))) {
 999        ret = 0x01UL;
1000    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1001        ret = 0x08UL;
1002    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1003        ret = 0x04UL;
1004    } else {
1005        ret = 0x02UL;
1006    }
1007
1008    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1009    env->fpscr |= ret << FPSCR_FPRF;
1010    env->crf[crfD] = ret;
1011    if (unlikely(ret == 0x01UL
1012                 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1013                     float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1014        /* sNaN comparison */
1015        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1016    }
1017}
1018
1019void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1020                  uint32_t crfD)
1021{
1022    CPU_DoubleU farg1, farg2;
1023    uint32_t ret = 0;
1024
1025    farg1.ll = arg1;
1026    farg2.ll = arg2;
1027
1028    if (unlikely(float64_is_any_nan(farg1.d) ||
1029                 float64_is_any_nan(farg2.d))) {
1030        ret = 0x01UL;
1031    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1032        ret = 0x08UL;
1033    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1034        ret = 0x04UL;
1035    } else {
1036        ret = 0x02UL;
1037    }
1038
1039    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1040    env->fpscr |= ret << FPSCR_FPRF;
1041    env->crf[crfD] = ret;
1042    if (unlikely(ret == 0x01UL)) {
1043        if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1044            float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1045            /* sNaN comparison */
1046            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
1047                                  POWERPC_EXCP_FP_VXVC, 1);
1048        } else {
1049            /* qNaN comparison */
1050            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
1051        }
1052    }
1053}
1054
1055/* Single-precision floating-point conversions */
1056static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1057{
1058    CPU_FloatU u;
1059
1060    u.f = int32_to_float32(val, &env->vec_status);
1061
1062    return u.l;
1063}
1064
1065static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1066{
1067    CPU_FloatU u;
1068
1069    u.f = uint32_to_float32(val, &env->vec_status);
1070
1071    return u.l;
1072}
1073
1074static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1075{
1076    CPU_FloatU u;
1077
1078    u.l = val;
1079    /* NaN are not treated the same way IEEE 754 does */
1080    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1081        return 0;
1082    }
1083
1084    return float32_to_int32(u.f, &env->vec_status);
1085}
1086
1087static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1088{
1089    CPU_FloatU u;
1090
1091    u.l = val;
1092    /* NaN are not treated the same way IEEE 754 does */
1093    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1094        return 0;
1095    }
1096
1097    return float32_to_uint32(u.f, &env->vec_status);
1098}
1099
1100static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1101{
1102    CPU_FloatU u;
1103
1104    u.l = val;
1105    /* NaN are not treated the same way IEEE 754 does */
1106    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1107        return 0;
1108    }
1109
1110    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1111}
1112
1113static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1114{
1115    CPU_FloatU u;
1116
1117    u.l = val;
1118    /* NaN are not treated the same way IEEE 754 does */
1119    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1120        return 0;
1121    }
1122
1123    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1124}
1125
1126static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1127{
1128    CPU_FloatU u;
1129    float32 tmp;
1130
1131    u.f = int32_to_float32(val, &env->vec_status);
1132    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1133    u.f = float32_div(u.f, tmp, &env->vec_status);
1134
1135    return u.l;
1136}
1137
1138static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1139{
1140    CPU_FloatU u;
1141    float32 tmp;
1142
1143    u.f = uint32_to_float32(val, &env->vec_status);
1144    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1145    u.f = float32_div(u.f, tmp, &env->vec_status);
1146
1147    return u.l;
1148}
1149
1150static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1151{
1152    CPU_FloatU u;
1153    float32 tmp;
1154
1155    u.l = val;
1156    /* NaN are not treated the same way IEEE 754 does */
1157    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1158        return 0;
1159    }
1160    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1161    u.f = float32_mul(u.f, tmp, &env->vec_status);
1162
1163    return float32_to_int32(u.f, &env->vec_status);
1164}
1165
1166static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1167{
1168    CPU_FloatU u;
1169    float32 tmp;
1170
1171    u.l = val;
1172    /* NaN are not treated the same way IEEE 754 does */
1173    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1174        return 0;
1175    }
1176    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1177    u.f = float32_mul(u.f, tmp, &env->vec_status);
1178
1179    return float32_to_uint32(u.f, &env->vec_status);
1180}
1181
1182#define HELPER_SPE_SINGLE_CONV(name)                              \
1183    uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1184    {                                                             \
1185        return e##name(env, val);                                 \
1186    }
1187/* efscfsi */
1188HELPER_SPE_SINGLE_CONV(fscfsi);
1189/* efscfui */
1190HELPER_SPE_SINGLE_CONV(fscfui);
1191/* efscfuf */
1192HELPER_SPE_SINGLE_CONV(fscfuf);
1193/* efscfsf */
1194HELPER_SPE_SINGLE_CONV(fscfsf);
1195/* efsctsi */
1196HELPER_SPE_SINGLE_CONV(fsctsi);
1197/* efsctui */
1198HELPER_SPE_SINGLE_CONV(fsctui);
1199/* efsctsiz */
1200HELPER_SPE_SINGLE_CONV(fsctsiz);
1201/* efsctuiz */
1202HELPER_SPE_SINGLE_CONV(fsctuiz);
1203/* efsctsf */
1204HELPER_SPE_SINGLE_CONV(fsctsf);
1205/* efsctuf */
1206HELPER_SPE_SINGLE_CONV(fsctuf);
1207
1208#define HELPER_SPE_VECTOR_CONV(name)                            \
1209    uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1210    {                                                           \
1211        return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1212            (uint64_t)e##name(env, val);                        \
1213    }
1214/* evfscfsi */
1215HELPER_SPE_VECTOR_CONV(fscfsi);
1216/* evfscfui */
1217HELPER_SPE_VECTOR_CONV(fscfui);
1218/* evfscfuf */
1219HELPER_SPE_VECTOR_CONV(fscfuf);
1220/* evfscfsf */
1221HELPER_SPE_VECTOR_CONV(fscfsf);
1222/* evfsctsi */
1223HELPER_SPE_VECTOR_CONV(fsctsi);
1224/* evfsctui */
1225HELPER_SPE_VECTOR_CONV(fsctui);
1226/* evfsctsiz */
1227HELPER_SPE_VECTOR_CONV(fsctsiz);
1228/* evfsctuiz */
1229HELPER_SPE_VECTOR_CONV(fsctuiz);
1230/* evfsctsf */
1231HELPER_SPE_VECTOR_CONV(fsctsf);
1232/* evfsctuf */
1233HELPER_SPE_VECTOR_CONV(fsctuf);
1234
1235/* Single-precision floating-point arithmetic */
1236static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1237{
1238    CPU_FloatU u1, u2;
1239
1240    u1.l = op1;
1241    u2.l = op2;
1242    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1243    return u1.l;
1244}
1245
1246static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1247{
1248    CPU_FloatU u1, u2;
1249
1250    u1.l = op1;
1251    u2.l = op2;
1252    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1253    return u1.l;
1254}
1255
1256static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1257{
1258    CPU_FloatU u1, u2;
1259
1260    u1.l = op1;
1261    u2.l = op2;
1262    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1263    return u1.l;
1264}
1265
1266static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1267{
1268    CPU_FloatU u1, u2;
1269
1270    u1.l = op1;
1271    u2.l = op2;
1272    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1273    return u1.l;
1274}
1275
1276#define HELPER_SPE_SINGLE_ARITH(name)                                   \
1277    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1278    {                                                                   \
1279        return e##name(env, op1, op2);                                  \
1280    }
1281/* efsadd */
1282HELPER_SPE_SINGLE_ARITH(fsadd);
1283/* efssub */
1284HELPER_SPE_SINGLE_ARITH(fssub);
1285/* efsmul */
1286HELPER_SPE_SINGLE_ARITH(fsmul);
1287/* efsdiv */
1288HELPER_SPE_SINGLE_ARITH(fsdiv);
1289
1290#define HELPER_SPE_VECTOR_ARITH(name)                                   \
1291    uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1292    {                                                                   \
1293        return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1294            (uint64_t)e##name(env, op1, op2);                           \
1295    }
1296/* evfsadd */
1297HELPER_SPE_VECTOR_ARITH(fsadd);
1298/* evfssub */
1299HELPER_SPE_VECTOR_ARITH(fssub);
1300/* evfsmul */
1301HELPER_SPE_VECTOR_ARITH(fsmul);
1302/* evfsdiv */
1303HELPER_SPE_VECTOR_ARITH(fsdiv);
1304
1305/* Single-precision floating-point comparisons */
1306static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1307{
1308    CPU_FloatU u1, u2;
1309
1310    u1.l = op1;
1311    u2.l = op2;
1312    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1313}
1314
1315static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1316{
1317    CPU_FloatU u1, u2;
1318
1319    u1.l = op1;
1320    u2.l = op2;
1321    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1322}
1323
1324static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1325{
1326    CPU_FloatU u1, u2;
1327
1328    u1.l = op1;
1329    u2.l = op2;
1330    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1331}
1332
1333static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1334{
1335    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1336    return efscmplt(env, op1, op2);
1337}
1338
1339static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1340{
1341    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1342    return efscmpgt(env, op1, op2);
1343}
1344
1345static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1346{
1347    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1348    return efscmpeq(env, op1, op2);
1349}
1350
1351#define HELPER_SINGLE_SPE_CMP(name)                                     \
1352    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1353    {                                                                   \
1354        return e##name(env, op1, op2);                                  \
1355    }
1356/* efststlt */
1357HELPER_SINGLE_SPE_CMP(fststlt);
1358/* efststgt */
1359HELPER_SINGLE_SPE_CMP(fststgt);
1360/* efststeq */
1361HELPER_SINGLE_SPE_CMP(fststeq);
1362/* efscmplt */
1363HELPER_SINGLE_SPE_CMP(fscmplt);
1364/* efscmpgt */
1365HELPER_SINGLE_SPE_CMP(fscmpgt);
1366/* efscmpeq */
1367HELPER_SINGLE_SPE_CMP(fscmpeq);
1368
1369static inline uint32_t evcmp_merge(int t0, int t1)
1370{
1371    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1372}
1373
1374#define HELPER_VECTOR_SPE_CMP(name)                                     \
1375    uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1376    {                                                                   \
1377        return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1378                           e##name(env, op1, op2));                     \
1379    }
1380/* evfststlt */
1381HELPER_VECTOR_SPE_CMP(fststlt);
1382/* evfststgt */
1383HELPER_VECTOR_SPE_CMP(fststgt);
1384/* evfststeq */
1385HELPER_VECTOR_SPE_CMP(fststeq);
1386/* evfscmplt */
1387HELPER_VECTOR_SPE_CMP(fscmplt);
1388/* evfscmpgt */
1389HELPER_VECTOR_SPE_CMP(fscmpgt);
1390/* evfscmpeq */
1391HELPER_VECTOR_SPE_CMP(fscmpeq);
1392
1393/* Double-precision floating-point conversion */
1394uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1395{
1396    CPU_DoubleU u;
1397
1398    u.d = int32_to_float64(val, &env->vec_status);
1399
1400    return u.ll;
1401}
1402
1403uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1404{
1405    CPU_DoubleU u;
1406
1407    u.d = int64_to_float64(val, &env->vec_status);
1408
1409    return u.ll;
1410}
1411
1412uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1413{
1414    CPU_DoubleU u;
1415
1416    u.d = uint32_to_float64(val, &env->vec_status);
1417
1418    return u.ll;
1419}
1420
1421uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1422{
1423    CPU_DoubleU u;
1424
1425    u.d = uint64_to_float64(val, &env->vec_status);
1426
1427    return u.ll;
1428}
1429
1430uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1431{
1432    CPU_DoubleU u;
1433
1434    u.ll = val;
1435    /* NaN are not treated the same way IEEE 754 does */
1436    if (unlikely(float64_is_any_nan(u.d))) {
1437        return 0;
1438    }
1439
1440    return float64_to_int32(u.d, &env->vec_status);
1441}
1442
1443uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1444{
1445    CPU_DoubleU u;
1446
1447    u.ll = val;
1448    /* NaN are not treated the same way IEEE 754 does */
1449    if (unlikely(float64_is_any_nan(u.d))) {
1450        return 0;
1451    }
1452
1453    return float64_to_uint32(u.d, &env->vec_status);
1454}
1455
1456uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1457{
1458    CPU_DoubleU u;
1459
1460    u.ll = val;
1461    /* NaN are not treated the same way IEEE 754 does */
1462    if (unlikely(float64_is_any_nan(u.d))) {
1463        return 0;
1464    }
1465
1466    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1467}
1468
1469uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1470{
1471    CPU_DoubleU u;
1472
1473    u.ll = val;
1474    /* NaN are not treated the same way IEEE 754 does */
1475    if (unlikely(float64_is_any_nan(u.d))) {
1476        return 0;
1477    }
1478
1479    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1480}
1481
1482uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1483{
1484    CPU_DoubleU u;
1485
1486    u.ll = val;
1487    /* NaN are not treated the same way IEEE 754 does */
1488    if (unlikely(float64_is_any_nan(u.d))) {
1489        return 0;
1490    }
1491
1492    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1493}
1494
1495uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1496{
1497    CPU_DoubleU u;
1498
1499    u.ll = val;
1500    /* NaN are not treated the same way IEEE 754 does */
1501    if (unlikely(float64_is_any_nan(u.d))) {
1502        return 0;
1503    }
1504
1505    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1506}
1507
1508uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1509{
1510    CPU_DoubleU u;
1511    float64 tmp;
1512
1513    u.d = int32_to_float64(val, &env->vec_status);
1514    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1515    u.d = float64_div(u.d, tmp, &env->vec_status);
1516
1517    return u.ll;
1518}
1519
1520uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1521{
1522    CPU_DoubleU u;
1523    float64 tmp;
1524
1525    u.d = uint32_to_float64(val, &env->vec_status);
1526    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1527    u.d = float64_div(u.d, tmp, &env->vec_status);
1528
1529    return u.ll;
1530}
1531
1532uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1533{
1534    CPU_DoubleU u;
1535    float64 tmp;
1536
1537    u.ll = val;
1538    /* NaN are not treated the same way IEEE 754 does */
1539    if (unlikely(float64_is_any_nan(u.d))) {
1540        return 0;
1541    }
1542    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1543    u.d = float64_mul(u.d, tmp, &env->vec_status);
1544
1545    return float64_to_int32(u.d, &env->vec_status);
1546}
1547
1548uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1549{
1550    CPU_DoubleU u;
1551    float64 tmp;
1552
1553    u.ll = val;
1554    /* NaN are not treated the same way IEEE 754 does */
1555    if (unlikely(float64_is_any_nan(u.d))) {
1556        return 0;
1557    }
1558    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1559    u.d = float64_mul(u.d, tmp, &env->vec_status);
1560
1561    return float64_to_uint32(u.d, &env->vec_status);
1562}
1563
1564uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1565{
1566    CPU_DoubleU u1;
1567    CPU_FloatU u2;
1568
1569    u1.ll = val;
1570    u2.f = float64_to_float32(u1.d, &env->vec_status);
1571
1572    return u2.l;
1573}
1574
1575uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1576{
1577    CPU_DoubleU u2;
1578    CPU_FloatU u1;
1579
1580    u1.l = val;
1581    u2.d = float32_to_float64(u1.f, &env->vec_status);
1582
1583    return u2.ll;
1584}
1585
1586/* Double precision fixed-point arithmetic */
1587uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1588{
1589    CPU_DoubleU u1, u2;
1590
1591    u1.ll = op1;
1592    u2.ll = op2;
1593    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1594    return u1.ll;
1595}
1596
1597uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1598{
1599    CPU_DoubleU u1, u2;
1600
1601    u1.ll = op1;
1602    u2.ll = op2;
1603    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1604    return u1.ll;
1605}
1606
1607uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1608{
1609    CPU_DoubleU u1, u2;
1610
1611    u1.ll = op1;
1612    u2.ll = op2;
1613    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1614    return u1.ll;
1615}
1616
1617uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1618{
1619    CPU_DoubleU u1, u2;
1620
1621    u1.ll = op1;
1622    u2.ll = op2;
1623    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1624    return u1.ll;
1625}
1626
1627/* Double precision floating point helpers */
1628uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1629{
1630    CPU_DoubleU u1, u2;
1631
1632    u1.ll = op1;
1633    u2.ll = op2;
1634    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1635}
1636
1637uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1638{
1639    CPU_DoubleU u1, u2;
1640
1641    u1.ll = op1;
1642    u2.ll = op2;
1643    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1644}
1645
1646uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1647{
1648    CPU_DoubleU u1, u2;
1649
1650    u1.ll = op1;
1651    u2.ll = op2;
1652    return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1653}
1654
1655uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1656{
1657    /* XXX: TODO: test special values (NaN, infinites, ...) */
1658    return helper_efdtstlt(env, op1, op2);
1659}
1660
1661uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1662{
1663    /* XXX: TODO: test special values (NaN, infinites, ...) */
1664    return helper_efdtstgt(env, op1, op2);
1665}
1666
1667uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1668{
1669    /* XXX: TODO: test special values (NaN, infinites, ...) */
1670    return helper_efdtsteq(env, op1, op2);
1671}
1672
1673#define float64_to_float64(x, env) x
1674
1675
1676/* VSX_ADD_SUB - VSX floating point add/subract
1677 *   name  - instruction mnemonic
1678 *   op    - operation (add or sub)
1679 *   nels  - number of elements (1, 2 or 4)
1680 *   tp    - type (float32 or float64)
1681 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1682 *   sfprf - set FPRF
1683 */
1684#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
1685void helper_##name(CPUPPCState *env, uint32_t opcode)                        \
1686{                                                                            \
1687    ppc_vsr_t xt, xa, xb;                                                    \
1688    int i;                                                                   \
1689                                                                             \
1690    getVSR(xA(opcode), &xa, env);                                            \
1691    getVSR(xB(opcode), &xb, env);                                            \
1692    getVSR(xT(opcode), &xt, env);                                            \
1693    helper_reset_fpstatus(env);                                              \
1694                                                                             \
1695    for (i = 0; i < nels; i++) {                                             \
1696        float_status tstat = env->fp_status;                                 \
1697        set_float_exception_flags(0, &tstat);                                \
1698        xt.fld = tp##_##op(xa.fld, xb.fld, &tstat);                          \
1699        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1700                                                                             \
1701        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1702            if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {      \
1703                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);    \
1704            } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||              \
1705                       tp##_is_signaling_nan(xb.fld, &tstat)) {              \
1706                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1707            }                                                                \
1708        }                                                                    \
1709                                                                             \
1710        if (r2sp) {                                                          \
1711            xt.fld = helper_frsp(env, xt.fld);                               \
1712        }                                                                    \
1713                                                                             \
1714        if (sfprf) {                                                         \
1715            helper_compute_fprf_float64(env, xt.fld);                        \
1716        }                                                                    \
1717    }                                                                        \
1718    putVSR(xT(opcode), &xt, env);                                            \
1719    float_check_status(env);                                                 \
1720}
1721
1722VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1723VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1724VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1725VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1726VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1727VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1728VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1729VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1730
1731void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
1732{
1733    ppc_vsr_t xt, xa, xb;
1734    float_status tstat;
1735
1736    getVSR(rA(opcode) + 32, &xa, env);
1737    getVSR(rB(opcode) + 32, &xb, env);
1738    getVSR(rD(opcode) + 32, &xt, env);
1739    helper_reset_fpstatus(env);
1740
1741    tstat = env->fp_status;
1742    if (unlikely(Rc(opcode) != 0)) {
1743        tstat.float_rounding_mode = float_round_to_odd;
1744    }
1745
1746    set_float_exception_flags(0, &tstat);
1747    xt.f128 = float128_add(xa.f128, xb.f128, &tstat);
1748    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1749
1750    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1751        if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
1752            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
1753        } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1754                   float128_is_signaling_nan(xb.f128, &tstat)) {
1755            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1756        }
1757    }
1758
1759    helper_compute_fprf_float128(env, xt.f128);
1760
1761    putVSR(rD(opcode) + 32, &xt, env);
1762    float_check_status(env);
1763}
1764
1765/* VSX_MUL - VSX floating point multiply
1766 *   op    - instruction mnemonic
1767 *   nels  - number of elements (1, 2 or 4)
1768 *   tp    - type (float32 or float64)
1769 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1770 *   sfprf - set FPRF
1771 */
1772#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
1773void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1774{                                                                            \
1775    ppc_vsr_t xt, xa, xb;                                                    \
1776    int i;                                                                   \
1777                                                                             \
1778    getVSR(xA(opcode), &xa, env);                                            \
1779    getVSR(xB(opcode), &xb, env);                                            \
1780    getVSR(xT(opcode), &xt, env);                                            \
1781    helper_reset_fpstatus(env);                                              \
1782                                                                             \
1783    for (i = 0; i < nels; i++) {                                             \
1784        float_status tstat = env->fp_status;                                 \
1785        set_float_exception_flags(0, &tstat);                                \
1786        xt.fld = tp##_mul(xa.fld, xb.fld, &tstat);                           \
1787        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1788                                                                             \
1789        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1790            if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) ||        \
1791                (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) {        \
1792                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf);    \
1793            } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||              \
1794                       tp##_is_signaling_nan(xb.fld, &tstat)) {              \
1795                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1796            }                                                                \
1797        }                                                                    \
1798                                                                             \
1799        if (r2sp) {                                                          \
1800            xt.fld = helper_frsp(env, xt.fld);                               \
1801        }                                                                    \
1802                                                                             \
1803        if (sfprf) {                                                         \
1804            helper_compute_fprf_float64(env, xt.fld);                        \
1805        }                                                                    \
1806    }                                                                        \
1807                                                                             \
1808    putVSR(xT(opcode), &xt, env);                                            \
1809    float_check_status(env);                                                 \
1810}
1811
1812VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1813VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1814VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1815VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1816
1817void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
1818{
1819    ppc_vsr_t xt, xa, xb;
1820    float_status tstat;
1821
1822    getVSR(rA(opcode) + 32, &xa, env);
1823    getVSR(rB(opcode) + 32, &xb, env);
1824    getVSR(rD(opcode) + 32, &xt, env);
1825
1826    helper_reset_fpstatus(env);
1827    tstat = env->fp_status;
1828    if (unlikely(Rc(opcode) != 0)) {
1829        tstat.float_rounding_mode = float_round_to_odd;
1830    }
1831
1832    set_float_exception_flags(0, &tstat);
1833    xt.f128 = float128_mul(xa.f128, xb.f128, &tstat);
1834    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1835
1836    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1837        if ((float128_is_infinity(xa.f128) && float128_is_zero(xb.f128)) ||
1838            (float128_is_infinity(xb.f128) && float128_is_zero(xa.f128))) {
1839            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
1840        } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1841                   float128_is_signaling_nan(xb.f128, &tstat)) {
1842            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1843        }
1844    }
1845    helper_compute_fprf_float128(env, xt.f128);
1846
1847    putVSR(rD(opcode) + 32, &xt, env);
1848    float_check_status(env);
1849}
1850
1851/* VSX_DIV - VSX floating point divide
1852 *   op    - instruction mnemonic
1853 *   nels  - number of elements (1, 2 or 4)
1854 *   tp    - type (float32 or float64)
1855 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1856 *   sfprf - set FPRF
1857 */
1858#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
1859void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1860{                                                                             \
1861    ppc_vsr_t xt, xa, xb;                                                     \
1862    int i;                                                                    \
1863                                                                              \
1864    getVSR(xA(opcode), &xa, env);                                             \
1865    getVSR(xB(opcode), &xb, env);                                             \
1866    getVSR(xT(opcode), &xt, env);                                             \
1867    helper_reset_fpstatus(env);                                               \
1868                                                                              \
1869    for (i = 0; i < nels; i++) {                                              \
1870        float_status tstat = env->fp_status;                                  \
1871        set_float_exception_flags(0, &tstat);                                 \
1872        xt.fld = tp##_div(xa.fld, xb.fld, &tstat);                            \
1873        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1874                                                                              \
1875        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1876            if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {       \
1877                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf);     \
1878            } else if (tp##_is_zero(xa.fld) &&                                \
1879                tp##_is_zero(xb.fld)) {                                       \
1880                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf);     \
1881            } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||               \
1882                tp##_is_signaling_nan(xb.fld, &tstat)) {                      \
1883                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1884            }                                                                 \
1885        }                                                                     \
1886                                                                              \
1887        if (r2sp) {                                                           \
1888            xt.fld = helper_frsp(env, xt.fld);                                \
1889        }                                                                     \
1890                                                                              \
1891        if (sfprf) {                                                          \
1892            helper_compute_fprf_float64(env, xt.fld);                         \
1893        }                                                                     \
1894    }                                                                         \
1895                                                                              \
1896    putVSR(xT(opcode), &xt, env);                                             \
1897    float_check_status(env);                                                  \
1898}
1899
1900VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1901VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1902VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1903VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1904
1905void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
1906{
1907    ppc_vsr_t xt, xa, xb;
1908    float_status tstat;
1909
1910    getVSR(rA(opcode) + 32, &xa, env);
1911    getVSR(rB(opcode) + 32, &xb, env);
1912    getVSR(rD(opcode) + 32, &xt, env);
1913
1914    helper_reset_fpstatus(env);
1915    tstat = env->fp_status;
1916    if (unlikely(Rc(opcode) != 0)) {
1917        tstat.float_rounding_mode = float_round_to_odd;
1918    }
1919
1920    set_float_exception_flags(0, &tstat);
1921    xt.f128 = float128_div(xa.f128, xb.f128, &tstat);
1922    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1923
1924    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1925        if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
1926            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
1927        } else if (float128_is_zero(xa.f128) &&
1928            float128_is_zero(xb.f128)) {
1929            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
1930        } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1931            float128_is_signaling_nan(xb.f128, &tstat)) {
1932            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1933        }
1934    }
1935
1936    helper_compute_fprf_float128(env, xt.f128);
1937    putVSR(rD(opcode) + 32, &xt, env);
1938    float_check_status(env);
1939}
1940
1941/* VSX_RE  - VSX floating point reciprocal estimate
1942 *   op    - instruction mnemonic
1943 *   nels  - number of elements (1, 2 or 4)
1944 *   tp    - type (float32 or float64)
1945 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1946 *   sfprf - set FPRF
1947 */
1948#define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
1949void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1950{                                                                             \
1951    ppc_vsr_t xt, xb;                                                         \
1952    int i;                                                                    \
1953                                                                              \
1954    getVSR(xB(opcode), &xb, env);                                             \
1955    getVSR(xT(opcode), &xt, env);                                             \
1956    helper_reset_fpstatus(env);                                               \
1957                                                                              \
1958    for (i = 0; i < nels; i++) {                                              \
1959        if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
1960                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1961        }                                                                     \
1962        xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status);                 \
1963                                                                              \
1964        if (r2sp) {                                                           \
1965            xt.fld = helper_frsp(env, xt.fld);                                \
1966        }                                                                     \
1967                                                                              \
1968        if (sfprf) {                                                          \
1969            helper_compute_fprf_float64(env, xt.fld);                         \
1970        }                                                                     \
1971    }                                                                         \
1972                                                                              \
1973    putVSR(xT(opcode), &xt, env);                                             \
1974    float_check_status(env);                                                  \
1975}
1976
1977VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
1978VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
1979VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
1980VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
1981
1982/* VSX_SQRT - VSX floating point square root
1983 *   op    - instruction mnemonic
1984 *   nels  - number of elements (1, 2 or 4)
1985 *   tp    - type (float32 or float64)
1986 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1987 *   sfprf - set FPRF
1988 */
1989#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
1990void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1991{                                                                            \
1992    ppc_vsr_t xt, xb;                                                        \
1993    int i;                                                                   \
1994                                                                             \
1995    getVSR(xB(opcode), &xb, env);                                            \
1996    getVSR(xT(opcode), &xt, env);                                            \
1997    helper_reset_fpstatus(env);                                              \
1998                                                                             \
1999    for (i = 0; i < nels; i++) {                                             \
2000        float_status tstat = env->fp_status;                                 \
2001        set_float_exception_flags(0, &tstat);                                \
2002        xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2003        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2004                                                                             \
2005        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2006            if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2007                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2008            } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2009                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2010            }                                                                \
2011        }                                                                    \
2012                                                                             \
2013        if (r2sp) {                                                          \
2014            xt.fld = helper_frsp(env, xt.fld);                               \
2015        }                                                                    \
2016                                                                             \
2017        if (sfprf) {                                                         \
2018            helper_compute_fprf_float64(env, xt.fld);                        \
2019        }                                                                    \
2020    }                                                                        \
2021                                                                             \
2022    putVSR(xT(opcode), &xt, env);                                            \
2023    float_check_status(env);                                                 \
2024}
2025
2026VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2027VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2028VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2029VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2030
2031/* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2032 *   op    - instruction mnemonic
2033 *   nels  - number of elements (1, 2 or 4)
2034 *   tp    - type (float32 or float64)
2035 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2036 *   sfprf - set FPRF
2037 */
2038#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
2039void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2040{                                                                            \
2041    ppc_vsr_t xt, xb;                                                        \
2042    int i;                                                                   \
2043                                                                             \
2044    getVSR(xB(opcode), &xb, env);                                            \
2045    getVSR(xT(opcode), &xt, env);                                            \
2046    helper_reset_fpstatus(env);                                              \
2047                                                                             \
2048    for (i = 0; i < nels; i++) {                                             \
2049        float_status tstat = env->fp_status;                                 \
2050        set_float_exception_flags(0, &tstat);                                \
2051        xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2052        xt.fld = tp##_div(tp##_one, xt.fld, &tstat);                         \
2053        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2054                                                                             \
2055        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2056            if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2057                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2058            } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2059                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2060            }                                                                \
2061        }                                                                    \
2062                                                                             \
2063        if (r2sp) {                                                          \
2064            xt.fld = helper_frsp(env, xt.fld);                               \
2065        }                                                                    \
2066                                                                             \
2067        if (sfprf) {                                                         \
2068            helper_compute_fprf_float64(env, xt.fld);                        \
2069        }                                                                    \
2070    }                                                                        \
2071                                                                             \
2072    putVSR(xT(opcode), &xt, env);                                            \
2073    float_check_status(env);                                                 \
2074}
2075
2076VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2077VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2078VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2079VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2080
2081/* VSX_TDIV - VSX floating point test for divide
2082 *   op    - instruction mnemonic
2083 *   nels  - number of elements (1, 2 or 4)
2084 *   tp    - type (float32 or float64)
2085 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2086 *   emin  - minimum unbiased exponent
2087 *   emax  - maximum unbiased exponent
2088 *   nbits - number of fraction bits
2089 */
2090#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2091void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2092{                                                                       \
2093    ppc_vsr_t xa, xb;                                                   \
2094    int i;                                                              \
2095    int fe_flag = 0;                                                    \
2096    int fg_flag = 0;                                                    \
2097                                                                        \
2098    getVSR(xA(opcode), &xa, env);                                       \
2099    getVSR(xB(opcode), &xb, env);                                       \
2100                                                                        \
2101    for (i = 0; i < nels; i++) {                                        \
2102        if (unlikely(tp##_is_infinity(xa.fld) ||                        \
2103                     tp##_is_infinity(xb.fld) ||                        \
2104                     tp##_is_zero(xb.fld))) {                           \
2105            fe_flag = 1;                                                \
2106            fg_flag = 1;                                                \
2107        } else {                                                        \
2108            int e_a = ppc_##tp##_get_unbiased_exp(xa.fld);              \
2109            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2110                                                                        \
2111            if (unlikely(tp##_is_any_nan(xa.fld) ||                     \
2112                         tp##_is_any_nan(xb.fld))) {                    \
2113                fe_flag = 1;                                            \
2114            } else if ((e_b <= emin) || (e_b >= (emax-2))) {            \
2115                fe_flag = 1;                                            \
2116            } else if (!tp##_is_zero(xa.fld) &&                         \
2117                       (((e_a - e_b) >= emax) ||                        \
2118                        ((e_a - e_b) <= (emin+1)) ||                    \
2119                         (e_a <= (emin+nbits)))) {                      \
2120                fe_flag = 1;                                            \
2121            }                                                           \
2122                                                                        \
2123            if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2124                /* XB is not zero because of the above check and */     \
2125                /* so must be denormalized.                      */     \
2126                fg_flag = 1;                                            \
2127            }                                                           \
2128        }                                                               \
2129    }                                                                   \
2130                                                                        \
2131    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2132}
2133
2134VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2135VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2136VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2137
2138/* VSX_TSQRT - VSX floating point test for square root
2139 *   op    - instruction mnemonic
2140 *   nels  - number of elements (1, 2 or 4)
2141 *   tp    - type (float32 or float64)
2142 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2143 *   emin  - minimum unbiased exponent
2144 *   emax  - maximum unbiased exponent
2145 *   nbits - number of fraction bits
2146 */
2147#define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2148void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2149{                                                                       \
2150    ppc_vsr_t xa, xb;                                                   \
2151    int i;                                                              \
2152    int fe_flag = 0;                                                    \
2153    int fg_flag = 0;                                                    \
2154                                                                        \
2155    getVSR(xA(opcode), &xa, env);                                       \
2156    getVSR(xB(opcode), &xb, env);                                       \
2157                                                                        \
2158    for (i = 0; i < nels; i++) {                                        \
2159        if (unlikely(tp##_is_infinity(xb.fld) ||                        \
2160                     tp##_is_zero(xb.fld))) {                           \
2161            fe_flag = 1;                                                \
2162            fg_flag = 1;                                                \
2163        } else {                                                        \
2164            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2165                                                                        \
2166            if (unlikely(tp##_is_any_nan(xb.fld))) {                    \
2167                fe_flag = 1;                                            \
2168            } else if (unlikely(tp##_is_zero(xb.fld))) {                \
2169                fe_flag = 1;                                            \
2170            } else if (unlikely(tp##_is_neg(xb.fld))) {                 \
2171                fe_flag = 1;                                            \
2172            } else if (!tp##_is_zero(xb.fld) &&                         \
2173                      (e_b <= (emin+nbits))) {                          \
2174                fe_flag = 1;                                            \
2175            }                                                           \
2176                                                                        \
2177            if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2178                /* XB is not zero because of the above check and */     \
2179                /* therefore must be denormalized.               */     \
2180                fg_flag = 1;                                            \
2181            }                                                           \
2182        }                                                               \
2183    }                                                                   \
2184                                                                        \
2185    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2186}
2187
2188VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2189VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2190VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2191
2192/* VSX_MADD - VSX floating point muliply/add variations
2193 *   op    - instruction mnemonic
2194 *   nels  - number of elements (1, 2 or 4)
2195 *   tp    - type (float32 or float64)
2196 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2197 *   maddflgs - flags for the float*muladd routine that control the
2198 *           various forms (madd, msub, nmadd, nmsub)
2199 *   afrm  - A form (1=A, 0=M)
2200 *   sfprf - set FPRF
2201 */
2202#define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp)              \
2203void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2204{                                                                             \
2205    ppc_vsr_t xt_in, xa, xb, xt_out;                                          \
2206    ppc_vsr_t *b, *c;                                                         \
2207    int i;                                                                    \
2208                                                                              \
2209    if (afrm) { /* AxB + T */                                                 \
2210        b = &xb;                                                              \
2211        c = &xt_in;                                                           \
2212    } else { /* AxT + B */                                                    \
2213        b = &xt_in;                                                           \
2214        c = &xb;                                                              \
2215    }                                                                         \
2216                                                                              \
2217    getVSR(xA(opcode), &xa, env);                                             \
2218    getVSR(xB(opcode), &xb, env);                                             \
2219    getVSR(xT(opcode), &xt_in, env);                                          \
2220                                                                              \
2221    xt_out = xt_in;                                                           \
2222                                                                              \
2223    helper_reset_fpstatus(env);                                               \
2224                                                                              \
2225    for (i = 0; i < nels; i++) {                                              \
2226        float_status tstat = env->fp_status;                                  \
2227        set_float_exception_flags(0, &tstat);                                 \
2228        if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2229            /* Avoid double rounding errors by rounding the intermediate */   \
2230            /* result to odd.                                            */   \
2231            set_float_rounding_mode(float_round_to_zero, &tstat);             \
2232            xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2233                                       maddflgs, &tstat);                     \
2234            xt_out.fld |= (get_float_exception_flags(&tstat) &                \
2235                              float_flag_inexact) != 0;                       \
2236        } else {                                                              \
2237            xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2238                                        maddflgs, &tstat);                    \
2239        }                                                                     \
2240        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2241                                                                              \
2242        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2243            tp##_maddsub_update_excp(env, xa.fld, b->fld, c->fld, maddflgs);  \
2244        }                                                                     \
2245                                                                              \
2246        if (r2sp) {                                                           \
2247            xt_out.fld = helper_frsp(env, xt_out.fld);                        \
2248        }                                                                     \
2249                                                                              \
2250        if (sfprf) {                                                          \
2251            helper_compute_fprf_float64(env, xt_out.fld);                     \
2252        }                                                                     \
2253    }                                                                         \
2254    putVSR(xT(opcode), &xt_out, env);                                         \
2255    float_check_status(env);                                                  \
2256}
2257
2258VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
2259VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
2260VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
2261VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
2262VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
2263VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
2264VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
2265VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
2266
2267VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
2268VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
2269VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
2270VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
2271VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
2272VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
2273VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
2274VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
2275
2276VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
2277VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
2278VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
2279VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
2280VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
2281VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
2282VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
2283VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
2284
2285VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
2286VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
2287VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
2288VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
2289VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
2290VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
2291VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
2292VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
2293
2294/* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2295 *   op    - instruction mnemonic
2296 *   cmp   - comparison operation
2297 *   exp   - expected result of comparison
2298 *   svxvc - set VXVC bit
2299 */
2300#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc)                                \
2301void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2302{                                                                             \
2303    ppc_vsr_t xt, xa, xb;                                                     \
2304    bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false;            \
2305                                                                              \
2306    getVSR(xA(opcode), &xa, env);                                             \
2307    getVSR(xB(opcode), &xb, env);                                             \
2308    getVSR(xT(opcode), &xt, env);                                             \
2309                                                                              \
2310    if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||              \
2311        float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {              \
2312        vxsnan_flag = true;                                                   \
2313        if (fpscr_ve == 0 && svxvc) {                                         \
2314            vxvc_flag = true;                                                 \
2315        }                                                                     \
2316    } else if (svxvc) {                                                       \
2317        vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2318            float64_is_quiet_nan(xb.VsrD(0), &env->fp_status);                \
2319    }                                                                         \
2320    if (vxsnan_flag) {                                                        \
2321        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);                \
2322    }                                                                         \
2323    if (vxvc_flag) {                                                          \
2324        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);                  \
2325    }                                                                         \
2326    vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag);                        \
2327                                                                              \
2328    if (!vex_flag) {                                                          \
2329        if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) {  \
2330            xt.VsrD(0) = -1;                                                  \
2331            xt.VsrD(1) = 0;                                                   \
2332        } else {                                                              \
2333            xt.VsrD(0) = 0;                                                   \
2334            xt.VsrD(1) = 0;                                                   \
2335        }                                                                     \
2336    }                                                                         \
2337    putVSR(xT(opcode), &xt, env);                                             \
2338    helper_float_check_status(env);                                           \
2339}
2340
2341VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2342VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2343VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2344VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2345
2346void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode)
2347{
2348    ppc_vsr_t xa, xb;
2349    int64_t exp_a, exp_b;
2350    uint32_t cc;
2351
2352    getVSR(xA(opcode), &xa, env);
2353    getVSR(xB(opcode), &xb, env);
2354
2355    exp_a = extract64(xa.VsrD(0), 52, 11);
2356    exp_b = extract64(xb.VsrD(0), 52, 11);
2357
2358    if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||
2359                 float64_is_any_nan(xb.VsrD(0)))) {
2360        cc = CRF_SO;
2361    } else {
2362        if (exp_a < exp_b) {
2363            cc = CRF_LT;
2364        } else if (exp_a > exp_b) {
2365            cc = CRF_GT;
2366        } else {
2367            cc = CRF_EQ;
2368        }
2369    }
2370
2371    env->fpscr &= ~(0x0F << FPSCR_FPRF);
2372    env->fpscr |= cc << FPSCR_FPRF;
2373    env->crf[BF(opcode)] = cc;
2374
2375    helper_float_check_status(env);
2376}
2377
2378void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode)
2379{
2380    ppc_vsr_t xa, xb;
2381    int64_t exp_a, exp_b;
2382    uint32_t cc;
2383
2384    getVSR(rA(opcode) + 32, &xa, env);
2385    getVSR(rB(opcode) + 32, &xb, env);
2386
2387    exp_a = extract64(xa.VsrD(0), 48, 15);
2388    exp_b = extract64(xb.VsrD(0), 48, 15);
2389
2390    if (unlikely(float128_is_any_nan(xa.f128) ||
2391                 float128_is_any_nan(xb.f128))) {
2392        cc = CRF_SO;
2393    } else {
2394        if (exp_a < exp_b) {
2395            cc = CRF_LT;
2396        } else if (exp_a > exp_b) {
2397            cc = CRF_GT;
2398        } else {
2399            cc = CRF_EQ;
2400        }
2401    }
2402
2403    env->fpscr &= ~(0x0F << FPSCR_FPRF);
2404    env->fpscr |= cc << FPSCR_FPRF;
2405    env->crf[BF(opcode)] = cc;
2406
2407    helper_float_check_status(env);
2408}
2409
2410#define VSX_SCALAR_CMP(op, ordered)                                      \
2411void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
2412{                                                                        \
2413    ppc_vsr_t xa, xb;                                                    \
2414    uint32_t cc = 0;                                                     \
2415    bool vxsnan_flag = false, vxvc_flag = false;                         \
2416                                                                         \
2417    helper_reset_fpstatus(env);                                          \
2418    getVSR(xA(opcode), &xa, env);                                        \
2419    getVSR(xB(opcode), &xb, env);                                        \
2420                                                                         \
2421    if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||         \
2422        float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {         \
2423        vxsnan_flag = true;                                              \
2424        cc = CRF_SO;                                                     \
2425        if (fpscr_ve == 0 && ordered) {                                  \
2426            vxvc_flag = true;                                            \
2427        }                                                                \
2428    } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2429               float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) {      \
2430        cc = CRF_SO;                                                     \
2431        if (ordered) {                                                   \
2432            vxvc_flag = true;                                            \
2433        }                                                                \
2434    }                                                                    \
2435    if (vxsnan_flag) {                                                   \
2436        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);           \
2437    }                                                                    \
2438    if (vxvc_flag) {                                                     \
2439        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);             \
2440    }                                                                    \
2441                                                                         \
2442    if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {           \
2443        cc |= CRF_LT;                                                    \
2444    } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {   \
2445        cc |= CRF_GT;                                                    \
2446    } else {                                                             \
2447        cc |= CRF_EQ;                                                    \
2448    }                                                                    \
2449                                                                         \
2450    env->fpscr &= ~(0x0F << FPSCR_FPRF);                                 \
2451    env->fpscr |= cc << FPSCR_FPRF;                                      \
2452    env->crf[BF(opcode)] = cc;                                           \
2453                                                                         \
2454    float_check_status(env);                                             \
2455}
2456
2457VSX_SCALAR_CMP(xscmpodp, 1)
2458VSX_SCALAR_CMP(xscmpudp, 0)
2459
2460#define VSX_SCALAR_CMPQ(op, ordered)                                    \
2461void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2462{                                                                       \
2463    ppc_vsr_t xa, xb;                                                   \
2464    uint32_t cc = 0;                                                    \
2465    bool vxsnan_flag = false, vxvc_flag = false;                        \
2466                                                                        \
2467    helper_reset_fpstatus(env);                                         \
2468    getVSR(rA(opcode) + 32, &xa, env);                                  \
2469    getVSR(rB(opcode) + 32, &xb, env);                                  \
2470                                                                        \
2471    if (float128_is_signaling_nan(xa.f128, &env->fp_status) ||          \
2472        float128_is_signaling_nan(xb.f128, &env->fp_status)) {          \
2473        vxsnan_flag = true;                                             \
2474        cc = CRF_SO;                                                    \
2475        if (fpscr_ve == 0 && ordered) {                                 \
2476            vxvc_flag = true;                                           \
2477        }                                                               \
2478    } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) ||       \
2479               float128_is_quiet_nan(xb.f128, &env->fp_status)) {       \
2480        cc = CRF_SO;                                                    \
2481        if (ordered) {                                                  \
2482            vxvc_flag = true;                                           \
2483        }                                                               \
2484    }                                                                   \
2485    if (vxsnan_flag) {                                                  \
2486        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);          \
2487    }                                                                   \
2488    if (vxvc_flag) {                                                    \
2489        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);            \
2490    }                                                                   \
2491                                                                        \
2492    if (float128_lt(xa.f128, xb.f128, &env->fp_status)) {               \
2493        cc |= CRF_LT;                                                   \
2494    } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) {       \
2495        cc |= CRF_GT;                                                   \
2496    } else {                                                            \
2497        cc |= CRF_EQ;                                                   \
2498    }                                                                   \
2499                                                                        \
2500    env->fpscr &= ~(0x0F << FPSCR_FPRF);                                \
2501    env->fpscr |= cc << FPSCR_FPRF;                                     \
2502    env->crf[BF(opcode)] = cc;                                          \
2503                                                                        \
2504    float_check_status(env);                                            \
2505}
2506
2507VSX_SCALAR_CMPQ(xscmpoqp, 1)
2508VSX_SCALAR_CMPQ(xscmpuqp, 0)
2509
2510/* VSX_MAX_MIN - VSX floating point maximum/minimum
2511 *   name  - instruction mnemonic
2512 *   op    - operation (max or min)
2513 *   nels  - number of elements (1, 2 or 4)
2514 *   tp    - type (float32 or float64)
2515 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2516 */
2517#define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2518void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2519{                                                                             \
2520    ppc_vsr_t xt, xa, xb;                                                     \
2521    int i;                                                                    \
2522                                                                              \
2523    getVSR(xA(opcode), &xa, env);                                             \
2524    getVSR(xB(opcode), &xb, env);                                             \
2525    getVSR(xT(opcode), &xt, env);                                             \
2526                                                                              \
2527    for (i = 0; i < nels; i++) {                                              \
2528        xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status);                  \
2529        if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) ||        \
2530                     tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
2531            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2532        }                                                                     \
2533    }                                                                         \
2534                                                                              \
2535    putVSR(xT(opcode), &xt, env);                                             \
2536    float_check_status(env);                                                  \
2537}
2538
2539VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2540VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2541VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2542VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2543VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2544VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2545
2546#define VSX_MAX_MINC(name, max)                                               \
2547void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2548{                                                                             \
2549    ppc_vsr_t xt, xa, xb;                                                     \
2550    bool vxsnan_flag = false, vex_flag = false;                               \
2551                                                                              \
2552    getVSR(rA(opcode) + 32, &xa, env);                                        \
2553    getVSR(rB(opcode) + 32, &xb, env);                                        \
2554    getVSR(rD(opcode) + 32, &xt, env);                                        \
2555                                                                              \
2556    if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||                            \
2557                 float64_is_any_nan(xb.VsrD(0)))) {                           \
2558        if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||          \
2559            float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
2560            vxsnan_flag = true;                                               \
2561        }                                                                     \
2562        xt.VsrD(0) = xb.VsrD(0);                                              \
2563    } else if ((max &&                                                        \
2564               !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) ||       \
2565               (!max &&                                                       \
2566               float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) {        \
2567        xt.VsrD(0) = xa.VsrD(0);                                              \
2568    } else {                                                                  \
2569        xt.VsrD(0) = xb.VsrD(0);                                              \
2570    }                                                                         \
2571                                                                              \
2572    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2573    if (vxsnan_flag) {                                                        \
2574            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2575    }                                                                         \
2576    if (!vex_flag) {                                                          \
2577        putVSR(rD(opcode) + 32, &xt, env);                                    \
2578    }                                                                         \
2579}                                                                             \
2580
2581VSX_MAX_MINC(xsmaxcdp, 1);
2582VSX_MAX_MINC(xsmincdp, 0);
2583
2584#define VSX_MAX_MINJ(name, max)                                               \
2585void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2586{                                                                             \
2587    ppc_vsr_t xt, xa, xb;                                                     \
2588    bool vxsnan_flag = false, vex_flag = false;                               \
2589                                                                              \
2590    getVSR(rA(opcode) + 32, &xa, env);                                        \
2591    getVSR(rB(opcode) + 32, &xb, env);                                        \
2592    getVSR(rD(opcode) + 32, &xt, env);                                        \
2593                                                                              \
2594    if (unlikely(float64_is_any_nan(xa.VsrD(0)))) {                           \
2595        if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) {          \
2596            vxsnan_flag = true;                                               \
2597        }                                                                     \
2598        xt.VsrD(0) = xa.VsrD(0);                                              \
2599    } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) {                    \
2600        if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
2601            vxsnan_flag = true;                                               \
2602        }                                                                     \
2603        xt.VsrD(0) = xb.VsrD(0);                                              \
2604    } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) {  \
2605        if (max) {                                                            \
2606            if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2607                xt.VsrD(0) = 0ULL;                                            \
2608            } else {                                                          \
2609                xt.VsrD(0) = 0x8000000000000000ULL;                           \
2610            }                                                                 \
2611        } else {                                                              \
2612            if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) {   \
2613                xt.VsrD(0) = 0x8000000000000000ULL;                           \
2614            } else {                                                          \
2615                xt.VsrD(0) = 0ULL;                                            \
2616            }                                                                 \
2617        }                                                                     \
2618    } else if ((max &&                                                        \
2619               !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) ||       \
2620               (!max &&                                                       \
2621               float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) {        \
2622        xt.VsrD(0) = xa.VsrD(0);                                              \
2623    } else {                                                                  \
2624        xt.VsrD(0) = xb.VsrD(0);                                              \
2625    }                                                                         \
2626                                                                              \
2627    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2628    if (vxsnan_flag) {                                                        \
2629            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2630    }                                                                         \
2631    if (!vex_flag) {                                                          \
2632        putVSR(rD(opcode) + 32, &xt, env);                                    \
2633    }                                                                         \
2634}                                                                             \
2635
2636VSX_MAX_MINJ(xsmaxjdp, 1);
2637VSX_MAX_MINJ(xsminjdp, 0);
2638
2639/* VSX_CMP - VSX floating point compare
2640 *   op    - instruction mnemonic
2641 *   nels  - number of elements (1, 2 or 4)
2642 *   tp    - type (float32 or float64)
2643 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2644 *   cmp   - comparison operation
2645 *   svxvc - set VXVC bit
2646 *   exp   - expected result of comparison
2647 */
2648#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
2649void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2650{                                                                         \
2651    ppc_vsr_t xt, xa, xb;                                                 \
2652    int i;                                                                \
2653    int all_true = 1;                                                     \
2654    int all_false = 1;                                                    \
2655                                                                          \
2656    getVSR(xA(opcode), &xa, env);                                         \
2657    getVSR(xB(opcode), &xb, env);                                         \
2658    getVSR(xT(opcode), &xt, env);                                         \
2659                                                                          \
2660    for (i = 0; i < nels; i++) {                                          \
2661        if (unlikely(tp##_is_any_nan(xa.fld) ||                           \
2662                     tp##_is_any_nan(xb.fld))) {                          \
2663            if (tp##_is_signaling_nan(xa.fld, &env->fp_status) ||         \
2664                tp##_is_signaling_nan(xb.fld, &env->fp_status)) {         \
2665                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);    \
2666            }                                                             \
2667            if (svxvc) {                                                  \
2668                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);      \
2669            }                                                             \
2670            xt.fld = 0;                                                   \
2671            all_true = 0;                                                 \
2672        } else {                                                          \
2673            if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) {     \
2674                xt.fld = -1;                                              \
2675                all_false = 0;                                            \
2676            } else {                                                      \
2677                xt.fld = 0;                                               \
2678                all_true = 0;                                             \
2679            }                                                             \
2680        }                                                                 \
2681    }                                                                     \
2682                                                                          \
2683    putVSR(xT(opcode), &xt, env);                                         \
2684    if ((opcode >> (31-21)) & 1) {                                        \
2685        env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);       \
2686    }                                                                     \
2687    float_check_status(env);                                              \
2688 }
2689
2690VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2691VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2692VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2693VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2694VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2695VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2696VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2697VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2698
2699/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2700 *   op    - instruction mnemonic
2701 *   nels  - number of elements (1, 2 or 4)
2702 *   stp   - source type (float32 or float64)
2703 *   ttp   - target type (float32 or float64)
2704 *   sfld  - source vsr_t field
2705 *   tfld  - target vsr_t field (f32 or f64)
2706 *   sfprf - set FPRF
2707 */
2708#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2709void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2710{                                                                  \
2711    ppc_vsr_t xt, xb;                                              \
2712    int i;                                                         \
2713                                                                   \
2714    getVSR(xB(opcode), &xb, env);                                  \
2715    getVSR(xT(opcode), &xt, env);                                  \
2716                                                                   \
2717    for (i = 0; i < nels; i++) {                                   \
2718        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);        \
2719        if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2720                                            &env->fp_status))) {   \
2721            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2722            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2723        }                                                          \
2724        if (sfprf) {                                               \
2725            helper_compute_fprf_##ttp(env, xt.tfld);               \
2726        }                                                          \
2727    }                                                              \
2728                                                                   \
2729    putVSR(xT(opcode), &xt, env);                                  \
2730    float_check_status(env);                                       \
2731}
2732
2733VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2734VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2735VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
2736VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
2737
2738/* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2739 *   op    - instruction mnemonic
2740 *   nels  - number of elements (1, 2 or 4)
2741 *   stp   - source type (float32 or float64)
2742 *   ttp   - target type (float32 or float64)
2743 *   sfld  - source vsr_t field
2744 *   tfld  - target vsr_t field (f32 or f64)
2745 *   sfprf - set FPRF
2746 */
2747#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2748void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2749{                                                                       \
2750    ppc_vsr_t xt, xb;                                                   \
2751    int i;                                                              \
2752                                                                        \
2753    getVSR(rB(opcode) + 32, &xb, env);                                  \
2754    getVSR(rD(opcode) + 32, &xt, env);                                  \
2755                                                                        \
2756    for (i = 0; i < nels; i++) {                                        \
2757        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
2758        if (unlikely(stp##_is_signaling_nan(xb.sfld,                    \
2759                                            &env->fp_status))) {        \
2760            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);      \
2761            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                      \
2762        }                                                               \
2763        if (sfprf) {                                                    \
2764            helper_compute_fprf_##ttp(env, xt.tfld);                    \
2765        }                                                               \
2766    }                                                                   \
2767                                                                        \
2768    putVSR(rD(opcode) + 32, &xt, env);                                  \
2769    float_check_status(env);                                            \
2770}
2771
2772VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2773
2774/* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2775 *                       involving one half precision value
2776 *   op    - instruction mnemonic
2777 *   nels  - number of elements (1, 2 or 4)
2778 *   stp   - source type
2779 *   ttp   - target type
2780 *   sfld  - source vsr_t field
2781 *   tfld  - target vsr_t field
2782 *   sfprf - set FPRF
2783 */
2784#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2785void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2786{                                                                  \
2787    ppc_vsr_t xt, xb;                                              \
2788    int i;                                                         \
2789                                                                   \
2790    getVSR(xB(opcode), &xb, env);                                  \
2791    memset(&xt, 0, sizeof(xt));                                    \
2792                                                                   \
2793    for (i = 0; i < nels; i++) {                                   \
2794        xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status);     \
2795        if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2796                                            &env->fp_status))) {   \
2797            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2798            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2799        }                                                          \
2800        if (sfprf) {                                               \
2801            helper_compute_fprf_##ttp(env, xt.tfld);               \
2802        }                                                          \
2803    }                                                              \
2804                                                                   \
2805    putVSR(xT(opcode), &xt, env);                                  \
2806    float_check_status(env);                                       \
2807}
2808
2809VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2810VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2811VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i  + 1), 0)
2812VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2813
2814/*
2815 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2816 * added to this later.
2817 */
2818void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode)
2819{
2820    ppc_vsr_t xt, xb;
2821    float_status tstat;
2822
2823    getVSR(rB(opcode) + 32, &xb, env);
2824    memset(&xt, 0, sizeof(xt));
2825
2826    tstat = env->fp_status;
2827    if (unlikely(Rc(opcode) != 0)) {
2828        tstat.float_rounding_mode = float_round_to_odd;
2829    }
2830
2831    xt.VsrD(0) = float128_to_float64(xb.f128, &tstat);
2832    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2833    if (unlikely(float128_is_signaling_nan(xb.f128,
2834                                           &tstat))) {
2835        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
2836        xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0));
2837    }
2838    helper_compute_fprf_float64(env, xt.VsrD(0));
2839
2840    putVSR(rD(opcode) + 32, &xt, env);
2841    float_check_status(env);
2842}
2843
2844uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2845{
2846    float_status tstat = env->fp_status;
2847    set_float_exception_flags(0, &tstat);
2848
2849    return (uint64_t)float64_to_float32(xb, &tstat) << 32;
2850}
2851
2852uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2853{
2854    float_status tstat = env->fp_status;
2855    set_float_exception_flags(0, &tstat);
2856
2857    return float32_to_float64(xb >> 32, &tstat);
2858}
2859
2860/* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2861 *   op    - instruction mnemonic
2862 *   nels  - number of elements (1, 2 or 4)
2863 *   stp   - source type (float32 or float64)
2864 *   ttp   - target type (int32, uint32, int64 or uint64)
2865 *   sfld  - source vsr_t field
2866 *   tfld  - target vsr_t field
2867 *   rnan  - resulting NaN
2868 */
2869#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
2870void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2871{                                                                            \
2872    ppc_vsr_t xt, xb;                                                        \
2873    int i;                                                                   \
2874                                                                             \
2875    getVSR(xB(opcode), &xb, env);                                            \
2876    getVSR(xT(opcode), &xt, env);                                            \
2877                                                                             \
2878    for (i = 0; i < nels; i++) {                                             \
2879        if (unlikely(stp##_is_any_nan(xb.sfld))) {                           \
2880            if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) {          \
2881                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);       \
2882            }                                                                \
2883            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
2884            xt.tfld = rnan;                                                  \
2885        } else {                                                             \
2886            xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                \
2887                          &env->fp_status);                                  \
2888            if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2889                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);        \
2890            }                                                                \
2891        }                                                                    \
2892    }                                                                        \
2893                                                                             \
2894    putVSR(xT(opcode), &xt, env);                                            \
2895    float_check_status(env);                                                 \
2896}
2897
2898VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2899                  0x8000000000000000ULL)
2900VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
2901                  0x80000000U)
2902VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
2903VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
2904VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
2905                  0x8000000000000000ULL)
2906VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
2907                  0x80000000U)
2908VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
2909VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
2910VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
2911                  0x8000000000000000ULL)
2912VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
2913VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
2914VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
2915
2916/* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2917 *   op    - instruction mnemonic
2918 *   stp   - source type (float32 or float64)
2919 *   ttp   - target type (int32, uint32, int64 or uint64)
2920 *   sfld  - source vsr_t field
2921 *   tfld  - target vsr_t field
2922 *   rnan  - resulting NaN
2923 */
2924#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan)             \
2925void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2926{                                                                            \
2927    ppc_vsr_t xt, xb;                                                        \
2928                                                                             \
2929    getVSR(rB(opcode) + 32, &xb, env);                                       \
2930    memset(&xt, 0, sizeof(xt));                                              \
2931                                                                             \
2932    if (unlikely(stp##_is_any_nan(xb.sfld))) {                               \
2933        if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) {              \
2934            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);           \
2935        }                                                                    \
2936        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);                \
2937        xt.tfld = rnan;                                                      \
2938    } else {                                                                 \
2939        xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                    \
2940                      &env->fp_status);                                      \
2941        if (env->fp_status.float_exception_flags & float_flag_invalid) {     \
2942            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
2943        }                                                                    \
2944    }                                                                        \
2945                                                                             \
2946    putVSR(rD(opcode) + 32, &xt, env);                                       \
2947    float_check_status(env);                                                 \
2948}
2949
2950VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0),          \
2951                  0x8000000000000000ULL)
2952
2953VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0),          \
2954                  0xffffffff80000000ULL)
2955VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
2956VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
2957
2958/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2959 *   op    - instruction mnemonic
2960 *   nels  - number of elements (1, 2 or 4)
2961 *   stp   - source type (int32, uint32, int64 or uint64)
2962 *   ttp   - target type (float32 or float64)
2963 *   sfld  - source vsr_t field
2964 *   tfld  - target vsr_t field
2965 *   jdef  - definition of the j index (i or 2*i)
2966 *   sfprf - set FPRF
2967 */
2968#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
2969void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2970{                                                                       \
2971    ppc_vsr_t xt, xb;                                                   \
2972    int i;                                                              \
2973                                                                        \
2974    getVSR(xB(opcode), &xb, env);                                       \
2975    getVSR(xT(opcode), &xt, env);                                       \
2976                                                                        \
2977    for (i = 0; i < nels; i++) {                                        \
2978        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
2979        if (r2sp) {                                                     \
2980            xt.tfld = helper_frsp(env, xt.tfld);                        \
2981        }                                                               \
2982        if (sfprf) {                                                    \
2983            helper_compute_fprf_float64(env, xt.tfld);                  \
2984        }                                                               \
2985    }                                                                   \
2986                                                                        \
2987    putVSR(xT(opcode), &xt, env);                                       \
2988    float_check_status(env);                                            \
2989}
2990
2991VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
2992VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
2993VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
2994VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
2995VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
2996VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
2997VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
2998VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
2999VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
3000VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
3001VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3002VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3003
3004/* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3005 *   op    - instruction mnemonic
3006 *   stp   - source type (int32, uint32, int64 or uint64)
3007 *   ttp   - target type (float32 or float64)
3008 *   sfld  - source vsr_t field
3009 *   tfld  - target vsr_t field
3010 */
3011#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld)              \
3012void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
3013{                                                                       \
3014    ppc_vsr_t xt, xb;                                                   \
3015                                                                        \
3016    getVSR(rB(opcode) + 32, &xb, env);                                  \
3017    getVSR(rD(opcode) + 32, &xt, env);                                  \
3018                                                                        \
3019    xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);                 \
3020    helper_compute_fprf_##ttp(env, xt.tfld);                            \
3021                                                                        \
3022    putVSR(xT(opcode) + 32, &xt, env);                                  \
3023    float_check_status(env);                                            \
3024}
3025
3026VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3027VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3028
3029/* For "use current rounding mode", define a value that will not be one of
3030 * the existing rounding model enums.
3031 */
3032#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3033  float_round_up + float_round_to_zero)
3034
3035/* VSX_ROUND - VSX floating point round
3036 *   op    - instruction mnemonic
3037 *   nels  - number of elements (1, 2 or 4)
3038 *   tp    - type (float32 or float64)
3039 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3040 *   rmode - rounding mode
3041 *   sfprf - set FPRF
3042 */
3043#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf)                     \
3044void helper_##op(CPUPPCState *env, uint32_t opcode)                    \
3045{                                                                      \
3046    ppc_vsr_t xt, xb;                                                  \
3047    int i;                                                             \
3048    getVSR(xB(opcode), &xb, env);                                      \
3049    getVSR(xT(opcode), &xt, env);                                      \
3050                                                                       \
3051    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3052        set_float_rounding_mode(rmode, &env->fp_status);               \
3053    }                                                                  \
3054                                                                       \
3055    for (i = 0; i < nels; i++) {                                       \
3056        if (unlikely(tp##_is_signaling_nan(xb.fld,                     \
3057                                           &env->fp_status))) {        \
3058            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);     \
3059            xt.fld = tp##_snan_to_qnan(xb.fld);                        \
3060        } else {                                                       \
3061            xt.fld = tp##_round_to_int(xb.fld, &env->fp_status);       \
3062        }                                                              \
3063        if (sfprf) {                                                   \
3064            helper_compute_fprf_float64(env, xt.fld);                  \
3065        }                                                              \
3066    }                                                                  \
3067                                                                       \
3068    /* If this is not a "use current rounding mode" instruction,       \
3069     * then inhibit setting of the XX bit and restore rounding         \
3070     * mode from FPSCR */                                              \
3071    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3072        fpscr_set_rounding_mode(env);                                  \
3073        env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
3074    }                                                                  \
3075                                                                       \
3076    putVSR(xT(opcode), &xt, env);                                      \
3077    float_check_status(env);                                           \
3078}
3079
3080VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3081VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3082VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3083VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3084VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3085
3086VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3087VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3088VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3089VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3090VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3091
3092VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3093VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3094VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3095VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3096VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3097
3098uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3099{
3100    helper_reset_fpstatus(env);
3101
3102    uint64_t xt = helper_frsp(env, xb);
3103
3104    helper_compute_fprf_float64(env, xt);
3105    float_check_status(env);
3106    return xt;
3107}
3108
3109#define VSX_XXPERM(op, indexed)                                       \
3110void helper_##op(CPUPPCState *env, uint32_t opcode)                   \
3111{                                                                     \
3112    ppc_vsr_t xt, xa, pcv, xto;                                       \
3113    int i, idx;                                                       \
3114                                                                      \
3115    getVSR(xA(opcode), &xa, env);                                     \
3116    getVSR(xT(opcode), &xt, env);                                     \
3117    getVSR(xB(opcode), &pcv, env);                                    \
3118                                                                      \
3119    for (i = 0; i < 16; i++) {                                        \
3120        idx = pcv.VsrB(i) & 0x1F;                                     \
3121        if (indexed) {                                                \
3122            idx = 31 - idx;                                           \
3123        }                                                             \
3124        xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3125    }                                                                 \
3126    putVSR(xT(opcode), &xto, env);                                    \
3127}
3128
3129VSX_XXPERM(xxperm, 0)
3130VSX_XXPERM(xxpermr, 1)
3131
3132void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
3133{
3134    ppc_vsr_t xt, xb;
3135    uint32_t exp, i, fraction;
3136
3137    getVSR(xB(opcode), &xb, env);
3138    memset(&xt, 0, sizeof(xt));
3139
3140    for (i = 0; i < 4; i++) {
3141        exp = (xb.VsrW(i) >> 23) & 0xFF;
3142        fraction = xb.VsrW(i) & 0x7FFFFF;
3143        if (exp != 0 && exp != 255) {
3144            xt.VsrW(i) = fraction | 0x00800000;
3145        } else {
3146            xt.VsrW(i) = fraction;
3147        }
3148    }
3149    putVSR(xT(opcode), &xt, env);
3150}
3151
3152/* VSX_TEST_DC - VSX floating point test data class
3153 *   op    - instruction mnemonic
3154 *   nels  - number of elements (1, 2 or 4)
3155 *   xbn   - VSR register number
3156 *   tp    - type (float32 or float64)
3157 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3158 *   tfld   - target vsr_t field (VsrD(*) or VsrW(*))
3159 *   fld_max - target field max
3160 *   scrf - set result in CR and FPCC
3161 */
3162#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf)  \
3163void helper_##op(CPUPPCState *env, uint32_t opcode)         \
3164{                                                           \
3165    ppc_vsr_t xt, xb;                                       \
3166    uint32_t i, sign, dcmx;                                 \
3167    uint32_t cc, match = 0;                                 \
3168                                                            \
3169    getVSR(xbn, &xb, env);                                  \
3170    if (!scrf) {                                            \
3171        memset(&xt, 0, sizeof(xt));                         \
3172        dcmx = DCMX_XV(opcode);                             \
3173    } else {                                                \
3174        dcmx = DCMX(opcode);                                \
3175    }                                                       \
3176                                                            \
3177    for (i = 0; i < nels; i++) {                            \
3178        sign = tp##_is_neg(xb.fld);                         \
3179        if (tp##_is_any_nan(xb.fld)) {                      \
3180            match = extract32(dcmx, 6, 1);                  \
3181        } else if (tp##_is_infinity(xb.fld)) {              \
3182            match = extract32(dcmx, 4 + !sign, 1);          \
3183        } else if (tp##_is_zero(xb.fld)) {                  \
3184            match = extract32(dcmx, 2 + !sign, 1);          \
3185        } else if (tp##_is_zero_or_denormal(xb.fld)) {      \
3186            match = extract32(dcmx, 0 + !sign, 1);          \
3187        }                                                   \
3188                                                            \
3189        if (scrf) {                                         \
3190            cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT;  \
3191            env->fpscr &= ~(0x0F << FPSCR_FPRF);            \
3192            env->fpscr |= cc << FPSCR_FPRF;                 \
3193            env->crf[BF(opcode)] = cc;                      \
3194        } else {                                            \
3195            xt.tfld = match ? fld_max : 0;                  \
3196        }                                                   \
3197        match = 0;                                          \
3198    }                                                       \
3199    if (!scrf) {                                            \
3200        putVSR(xT(opcode), &xt, env);                       \
3201    }                                                       \
3202}
3203
3204VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3205VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3206VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3207VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3208
3209void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
3210{
3211    ppc_vsr_t xb;
3212    uint32_t dcmx, sign, exp;
3213    uint32_t cc, match = 0, not_sp = 0;
3214
3215    getVSR(xB(opcode), &xb, env);
3216    dcmx = DCMX(opcode);
3217    exp = (xb.VsrD(0) >> 52) & 0x7FF;
3218
3219    sign = float64_is_neg(xb.VsrD(0));
3220    if (float64_is_any_nan(xb.VsrD(0))) {
3221        match = extract32(dcmx, 6, 1);
3222    } else if (float64_is_infinity(xb.VsrD(0))) {
3223        match = extract32(dcmx, 4 + !sign, 1);
3224    } else if (float64_is_zero(xb.VsrD(0))) {
3225        match = extract32(dcmx, 2 + !sign, 1);
3226    } else if (float64_is_zero_or_denormal(xb.VsrD(0)) ||
3227               (exp > 0 && exp < 0x381)) {
3228        match = extract32(dcmx, 0 + !sign, 1);
3229    }
3230
3231    not_sp = !float64_eq(xb.VsrD(0),
3232                         float32_to_float64(
3233                             float64_to_float32(xb.VsrD(0), &env->fp_status),
3234                             &env->fp_status), &env->fp_status);
3235
3236    cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3237    env->fpscr &= ~(0x0F << FPSCR_FPRF);
3238    env->fpscr |= cc << FPSCR_FPRF;
3239    env->crf[BF(opcode)] = cc;
3240}
3241
3242void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
3243{
3244    ppc_vsr_t xb;
3245    ppc_vsr_t xt;
3246    uint8_t r = Rrm(opcode);
3247    uint8_t ex = Rc(opcode);
3248    uint8_t rmc = RMC(opcode);
3249    uint8_t rmode = 0;
3250    float_status tstat;
3251
3252    getVSR(rB(opcode) + 32, &xb, env);
3253    memset(&xt, 0, sizeof(xt));
3254    helper_reset_fpstatus(env);
3255
3256    if (r == 0 && rmc == 0) {
3257        rmode = float_round_ties_away;
3258    } else if (r == 0 && rmc == 0x3) {
3259        rmode = fpscr_rn;
3260    } else if (r == 1) {
3261        switch (rmc) {
3262        case 0:
3263            rmode = float_round_nearest_even;
3264            break;
3265        case 1:
3266            rmode = float_round_to_zero;
3267            break;
3268        case 2:
3269            rmode = float_round_up;
3270            break;
3271        case 3:
3272            rmode = float_round_down;
3273            break;
3274        default:
3275            abort();
3276        }
3277    }
3278
3279    tstat = env->fp_status;
3280    set_float_exception_flags(0, &tstat);
3281    set_float_rounding_mode(rmode, &tstat);
3282    xt.f128 = float128_round_to_int(xb.f128, &tstat);
3283    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3284
3285    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3286        if (float128_is_signaling_nan(xb.f128, &tstat)) {
3287            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
3288            xt.f128 = float128_snan_to_qnan(xt.f128);
3289        }
3290    }
3291
3292    if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3293        env->fp_status.float_exception_flags &= ~float_flag_inexact;
3294    }
3295
3296    helper_compute_fprf_float128(env, xt.f128);
3297    float_check_status(env);
3298    putVSR(rD(opcode) + 32, &xt, env);
3299}
3300
3301void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
3302{
3303    ppc_vsr_t xb;
3304    ppc_vsr_t xt;
3305    uint8_t r = Rrm(opcode);
3306    uint8_t rmc = RMC(opcode);
3307    uint8_t rmode = 0;
3308    floatx80 round_res;
3309    float_status tstat;
3310
3311    getVSR(rB(opcode) + 32, &xb, env);
3312    memset(&xt, 0, sizeof(xt));
3313    helper_reset_fpstatus(env);
3314
3315    if (r == 0 && rmc == 0) {
3316        rmode = float_round_ties_away;
3317    } else if (r == 0 && rmc == 0x3) {
3318        rmode = fpscr_rn;
3319    } else if (r == 1) {
3320        switch (rmc) {
3321        case 0:
3322            rmode = float_round_nearest_even;
3323            break;
3324        case 1:
3325            rmode = float_round_to_zero;
3326            break;
3327        case 2:
3328            rmode = float_round_up;
3329            break;
3330        case 3:
3331            rmode = float_round_down;
3332            break;
3333        default:
3334            abort();
3335        }
3336    }
3337
3338    tstat = env->fp_status;
3339    set_float_exception_flags(0, &tstat);
3340    set_float_rounding_mode(rmode, &tstat);
3341    round_res = float128_to_floatx80(xb.f128, &tstat);
3342    xt.f128 = floatx80_to_float128(round_res, &tstat);
3343    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3344
3345    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3346        if (float128_is_signaling_nan(xb.f128, &tstat)) {
3347            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
3348            xt.f128 = float128_snan_to_qnan(xt.f128);
3349        }
3350    }
3351
3352    helper_compute_fprf_float128(env, xt.f128);
3353    putVSR(rD(opcode) + 32, &xt, env);
3354    float_check_status(env);
3355}
3356
3357void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
3358{
3359    ppc_vsr_t xb;
3360    ppc_vsr_t xt;
3361    float_status tstat;
3362
3363    getVSR(rB(opcode) + 32, &xb, env);
3364    memset(&xt, 0, sizeof(xt));
3365    helper_reset_fpstatus(env);
3366
3367    tstat = env->fp_status;
3368    if (unlikely(Rc(opcode) != 0)) {
3369        tstat.float_rounding_mode = float_round_to_odd;
3370    }
3371
3372    set_float_exception_flags(0, &tstat);
3373    xt.f128 = float128_sqrt(xb.f128, &tstat);
3374    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3375
3376    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3377        if (float128_is_signaling_nan(xb.f128, &tstat)) {
3378            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
3379            xt.f128 = float128_snan_to_qnan(xb.f128);
3380        } else if  (float128_is_quiet_nan(xb.f128, &tstat)) {
3381            xt.f128 = xb.f128;
3382        } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) {
3383            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
3384            set_snan_bit_is_one(0, &env->fp_status);
3385            xt.f128 = float128_default_nan(&env->fp_status);
3386        }
3387    }
3388
3389    helper_compute_fprf_float128(env, xt.f128);
3390    putVSR(rD(opcode) + 32, &xt, env);
3391    float_check_status(env);
3392}
3393
3394void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
3395{
3396    ppc_vsr_t xt, xa, xb;
3397    float_status tstat;
3398
3399    getVSR(rA(opcode) + 32, &xa, env);
3400    getVSR(rB(opcode) + 32, &xb, env);
3401    getVSR(rD(opcode) + 32, &xt, env);
3402    helper_reset_fpstatus(env);
3403
3404    tstat = env->fp_status;
3405    if (unlikely(Rc(opcode) != 0)) {
3406        tstat.float_rounding_mode = float_round_to_odd;
3407    }
3408
3409    set_float_exception_flags(0, &tstat);
3410    xt.f128 = float128_sub(xa.f128, xb.f128, &tstat);
3411    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3412
3413    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3414        if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
3415            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
3416        } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
3417                   float128_is_signaling_nan(xb.f128, &tstat)) {
3418            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
3419        }
3420    }
3421
3422    helper_compute_fprf_float128(env, xt.f128);
3423    putVSR(rD(opcode) + 32, &xt, env);
3424    float_check_status(env);
3425}
3426