qemu/target/ppc/fpu_helper.c
<<
>>
Prefs
   1/*
   2 *  PowerPC floating point and SPE emulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "exec/helper-proto.h"
  22#include "exec/exec-all.h"
  23#include "internal.h"
  24#include "fpu/softfloat.h"
  25
  26static inline float128 float128_snan_to_qnan(float128 x)
  27{
  28    float128 r;
  29
  30    r.high = x.high | 0x0000800000000000;
  31    r.low = x.low;
  32    return r;
  33}
  34
  35#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
  36#define float32_snan_to_qnan(x) ((x) | 0x00400000)
  37#define float16_snan_to_qnan(x) ((x) | 0x0200)
  38
  39/*****************************************************************************/
  40/* Floating point operations helpers */
  41uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
  42{
  43    CPU_FloatU f;
  44    CPU_DoubleU d;
  45
  46    f.l = arg;
  47    d.d = float32_to_float64(f.f, &env->fp_status);
  48    return d.ll;
  49}
  50
  51uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
  52{
  53    CPU_FloatU f;
  54    CPU_DoubleU d;
  55
  56    d.ll = arg;
  57    f.f = float64_to_float32(d.d, &env->fp_status);
  58    return f.l;
  59}
  60
  61static inline int ppc_float32_get_unbiased_exp(float32 f)
  62{
  63    return ((f >> 23) & 0xFF) - 127;
  64}
  65
  66static inline int ppc_float64_get_unbiased_exp(float64 f)
  67{
  68    return ((f >> 52) & 0x7FF) - 1023;
  69}
  70
  71#define COMPUTE_FPRF(tp)                                       \
  72void helper_compute_fprf_##tp(CPUPPCState *env, tp arg)        \
  73{                                                              \
  74    int isneg;                                                 \
  75    int fprf;                                                  \
  76                                                               \
  77    isneg = tp##_is_neg(arg);                                  \
  78    if (unlikely(tp##_is_any_nan(arg))) {                      \
  79        if (tp##_is_signaling_nan(arg, &env->fp_status)) {     \
  80            /* Signaling NaN: flags are undefined */           \
  81            fprf = 0x00;                                       \
  82        } else {                                               \
  83            /* Quiet NaN */                                    \
  84            fprf = 0x11;                                       \
  85        }                                                      \
  86    } else if (unlikely(tp##_is_infinity(arg))) {              \
  87        /* +/- infinity */                                     \
  88        if (isneg) {                                           \
  89            fprf = 0x09;                                       \
  90        } else {                                               \
  91            fprf = 0x05;                                       \
  92        }                                                      \
  93    } else {                                                   \
  94        if (tp##_is_zero(arg)) {                               \
  95            /* +/- zero */                                     \
  96            if (isneg) {                                       \
  97                fprf = 0x12;                                   \
  98            } else {                                           \
  99                fprf = 0x02;                                   \
 100            }                                                  \
 101        } else {                                               \
 102            if (tp##_is_zero_or_denormal(arg)) {               \
 103                /* Denormalized numbers */                     \
 104                fprf = 0x10;                                   \
 105            } else {                                           \
 106                /* Normalized numbers */                       \
 107                fprf = 0x00;                                   \
 108            }                                                  \
 109            if (isneg) {                                       \
 110                fprf |= 0x08;                                  \
 111            } else {                                           \
 112                fprf |= 0x04;                                  \
 113            }                                                  \
 114        }                                                      \
 115    }                                                          \
 116    /* We update FPSCR_FPRF */                                 \
 117    env->fpscr &= ~(0x1F << FPSCR_FPRF);                       \
 118    env->fpscr |= fprf << FPSCR_FPRF;                          \
 119}
 120
 121COMPUTE_FPRF(float16)
 122COMPUTE_FPRF(float32)
 123COMPUTE_FPRF(float64)
 124COMPUTE_FPRF(float128)
 125
 126/* Floating-point invalid operations exception */
 127static inline __attribute__((__always_inline__))
 128uint64_t float_invalid_op_excp(CPUPPCState *env, int op, int set_fpcc)
 129{
 130    CPUState *cs = CPU(ppc_env_get_cpu(env));
 131    uint64_t ret = 0;
 132    int ve;
 133
 134    ve = fpscr_ve;
 135    switch (op) {
 136    case POWERPC_EXCP_FP_VXSNAN:
 137        env->fpscr |= 1 << FPSCR_VXSNAN;
 138        break;
 139    case POWERPC_EXCP_FP_VXSOFT:
 140        env->fpscr |= 1 << FPSCR_VXSOFT;
 141        break;
 142    case POWERPC_EXCP_FP_VXISI:
 143        /* Magnitude subtraction of infinities */
 144        env->fpscr |= 1 << FPSCR_VXISI;
 145        goto update_arith;
 146    case POWERPC_EXCP_FP_VXIDI:
 147        /* Division of infinity by infinity */
 148        env->fpscr |= 1 << FPSCR_VXIDI;
 149        goto update_arith;
 150    case POWERPC_EXCP_FP_VXZDZ:
 151        /* Division of zero by zero */
 152        env->fpscr |= 1 << FPSCR_VXZDZ;
 153        goto update_arith;
 154    case POWERPC_EXCP_FP_VXIMZ:
 155        /* Multiplication of zero by infinity */
 156        env->fpscr |= 1 << FPSCR_VXIMZ;
 157        goto update_arith;
 158    case POWERPC_EXCP_FP_VXVC:
 159        /* Ordered comparison of NaN */
 160        env->fpscr |= 1 << FPSCR_VXVC;
 161        if (set_fpcc) {
 162            env->fpscr &= ~(0xF << FPSCR_FPCC);
 163            env->fpscr |= 0x11 << FPSCR_FPCC;
 164        }
 165        /* We must update the target FPR before raising the exception */
 166        if (ve != 0) {
 167            cs->exception_index = POWERPC_EXCP_PROGRAM;
 168            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
 169            /* Update the floating-point enabled exception summary */
 170            env->fpscr |= 1 << FPSCR_FEX;
 171            /* Exception is differed */
 172            ve = 0;
 173        }
 174        break;
 175    case POWERPC_EXCP_FP_VXSQRT:
 176        /* Square root of a negative number */
 177        env->fpscr |= 1 << FPSCR_VXSQRT;
 178    update_arith:
 179        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 180        if (ve == 0) {
 181            /* Set the result to quiet NaN */
 182            ret = 0x7FF8000000000000ULL;
 183            if (set_fpcc) {
 184                env->fpscr &= ~(0xF << FPSCR_FPCC);
 185                env->fpscr |= 0x11 << FPSCR_FPCC;
 186            }
 187        }
 188        break;
 189    case POWERPC_EXCP_FP_VXCVI:
 190        /* Invalid conversion */
 191        env->fpscr |= 1 << FPSCR_VXCVI;
 192        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 193        if (ve == 0) {
 194            /* Set the result to quiet NaN */
 195            ret = 0x7FF8000000000000ULL;
 196            if (set_fpcc) {
 197                env->fpscr &= ~(0xF << FPSCR_FPCC);
 198                env->fpscr |= 0x11 << FPSCR_FPCC;
 199            }
 200        }
 201        break;
 202    }
 203    /* Update the floating-point invalid operation summary */
 204    env->fpscr |= 1 << FPSCR_VX;
 205    /* Update the floating-point exception summary */
 206    env->fpscr |= FP_FX;
 207    if (ve != 0) {
 208        /* Update the floating-point enabled exception summary */
 209        env->fpscr |= 1 << FPSCR_FEX;
 210        if (msr_fe0 != 0 || msr_fe1 != 0) {
 211            /* GETPC() works here because this is inline */
 212            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 213                                   POWERPC_EXCP_FP | op, GETPC());
 214        }
 215    }
 216    return ret;
 217}
 218
 219static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
 220{
 221    env->fpscr |= 1 << FPSCR_ZX;
 222    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
 223    /* Update the floating-point exception summary */
 224    env->fpscr |= FP_FX;
 225    if (fpscr_ze != 0) {
 226        /* Update the floating-point enabled exception summary */
 227        env->fpscr |= 1 << FPSCR_FEX;
 228        if (msr_fe0 != 0 || msr_fe1 != 0) {
 229            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 230                                   POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
 231                                   raddr);
 232        }
 233    }
 234}
 235
 236static inline void float_overflow_excp(CPUPPCState *env)
 237{
 238    CPUState *cs = CPU(ppc_env_get_cpu(env));
 239
 240    env->fpscr |= 1 << FPSCR_OX;
 241    /* Update the floating-point exception summary */
 242    env->fpscr |= FP_FX;
 243    if (fpscr_oe != 0) {
 244        /* XXX: should adjust the result */
 245        /* Update the floating-point enabled exception summary */
 246        env->fpscr |= 1 << FPSCR_FEX;
 247        /* We must update the target FPR before raising the exception */
 248        cs->exception_index = POWERPC_EXCP_PROGRAM;
 249        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 250    } else {
 251        env->fpscr |= 1 << FPSCR_XX;
 252        env->fpscr |= 1 << FPSCR_FI;
 253    }
 254}
 255
 256static inline void float_underflow_excp(CPUPPCState *env)
 257{
 258    CPUState *cs = CPU(ppc_env_get_cpu(env));
 259
 260    env->fpscr |= 1 << FPSCR_UX;
 261    /* Update the floating-point exception summary */
 262    env->fpscr |= FP_FX;
 263    if (fpscr_ue != 0) {
 264        /* XXX: should adjust the result */
 265        /* Update the floating-point enabled exception summary */
 266        env->fpscr |= 1 << FPSCR_FEX;
 267        /* We must update the target FPR before raising the exception */
 268        cs->exception_index = POWERPC_EXCP_PROGRAM;
 269        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 270    }
 271}
 272
 273static inline void float_inexact_excp(CPUPPCState *env)
 274{
 275    CPUState *cs = CPU(ppc_env_get_cpu(env));
 276
 277    env->fpscr |= 1 << FPSCR_FI;
 278    env->fpscr |= 1 << FPSCR_XX;
 279    /* Update the floating-point exception summary */
 280    env->fpscr |= FP_FX;
 281    if (fpscr_xe != 0) {
 282        /* Update the floating-point enabled exception summary */
 283        env->fpscr |= 1 << FPSCR_FEX;
 284        /* We must update the target FPR before raising the exception */
 285        cs->exception_index = POWERPC_EXCP_PROGRAM;
 286        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 287    }
 288}
 289
 290static inline void fpscr_set_rounding_mode(CPUPPCState *env)
 291{
 292    int rnd_type;
 293
 294    /* Set rounding mode */
 295    switch (fpscr_rn) {
 296    case 0:
 297        /* Best approximation (round to nearest) */
 298        rnd_type = float_round_nearest_even;
 299        break;
 300    case 1:
 301        /* Smaller magnitude (round toward zero) */
 302        rnd_type = float_round_to_zero;
 303        break;
 304    case 2:
 305        /* Round toward +infinite */
 306        rnd_type = float_round_up;
 307        break;
 308    default:
 309    case 3:
 310        /* Round toward -infinite */
 311        rnd_type = float_round_down;
 312        break;
 313    }
 314    set_float_rounding_mode(rnd_type, &env->fp_status);
 315}
 316
 317void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
 318{
 319    int prev;
 320
 321    prev = (env->fpscr >> bit) & 1;
 322    env->fpscr &= ~(1 << bit);
 323    if (prev == 1) {
 324        switch (bit) {
 325        case FPSCR_RN1:
 326        case FPSCR_RN:
 327            fpscr_set_rounding_mode(env);
 328            break;
 329        case FPSCR_VXSNAN:
 330        case FPSCR_VXISI:
 331        case FPSCR_VXIDI:
 332        case FPSCR_VXZDZ:
 333        case FPSCR_VXIMZ:
 334        case FPSCR_VXVC:
 335        case FPSCR_VXSOFT:
 336        case FPSCR_VXSQRT:
 337        case FPSCR_VXCVI:
 338            if (!fpscr_ix) {
 339                /* Set VX bit to zero */
 340                env->fpscr &= ~(1 << FPSCR_VX);
 341            }
 342            break;
 343        case FPSCR_OX:
 344        case FPSCR_UX:
 345        case FPSCR_ZX:
 346        case FPSCR_XX:
 347        case FPSCR_VE:
 348        case FPSCR_OE:
 349        case FPSCR_UE:
 350        case FPSCR_ZE:
 351        case FPSCR_XE:
 352            if (!fpscr_eex) {
 353                /* Set the FEX bit */
 354                env->fpscr &= ~(1 << FPSCR_FEX);
 355            }
 356            break;
 357        default:
 358            break;
 359        }
 360    }
 361}
 362
 363void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
 364{
 365    CPUState *cs = CPU(ppc_env_get_cpu(env));
 366    int prev;
 367
 368    prev = (env->fpscr >> bit) & 1;
 369    env->fpscr |= 1 << bit;
 370    if (prev == 0) {
 371        switch (bit) {
 372        case FPSCR_VX:
 373            env->fpscr |= FP_FX;
 374            if (fpscr_ve) {
 375                goto raise_ve;
 376            }
 377            break;
 378        case FPSCR_OX:
 379            env->fpscr |= FP_FX;
 380            if (fpscr_oe) {
 381                goto raise_oe;
 382            }
 383            break;
 384        case FPSCR_UX:
 385            env->fpscr |= FP_FX;
 386            if (fpscr_ue) {
 387                goto raise_ue;
 388            }
 389            break;
 390        case FPSCR_ZX:
 391            env->fpscr |= FP_FX;
 392            if (fpscr_ze) {
 393                goto raise_ze;
 394            }
 395            break;
 396        case FPSCR_XX:
 397            env->fpscr |= FP_FX;
 398            if (fpscr_xe) {
 399                goto raise_xe;
 400            }
 401            break;
 402        case FPSCR_VXSNAN:
 403        case FPSCR_VXISI:
 404        case FPSCR_VXIDI:
 405        case FPSCR_VXZDZ:
 406        case FPSCR_VXIMZ:
 407        case FPSCR_VXVC:
 408        case FPSCR_VXSOFT:
 409        case FPSCR_VXSQRT:
 410        case FPSCR_VXCVI:
 411            env->fpscr |= 1 << FPSCR_VX;
 412            env->fpscr |= FP_FX;
 413            if (fpscr_ve != 0) {
 414                goto raise_ve;
 415            }
 416            break;
 417        case FPSCR_VE:
 418            if (fpscr_vx != 0) {
 419            raise_ve:
 420                env->error_code = POWERPC_EXCP_FP;
 421                if (fpscr_vxsnan) {
 422                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
 423                }
 424                if (fpscr_vxisi) {
 425                    env->error_code |= POWERPC_EXCP_FP_VXISI;
 426                }
 427                if (fpscr_vxidi) {
 428                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
 429                }
 430                if (fpscr_vxzdz) {
 431                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
 432                }
 433                if (fpscr_vximz) {
 434                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
 435                }
 436                if (fpscr_vxvc) {
 437                    env->error_code |= POWERPC_EXCP_FP_VXVC;
 438                }
 439                if (fpscr_vxsoft) {
 440                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
 441                }
 442                if (fpscr_vxsqrt) {
 443                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
 444                }
 445                if (fpscr_vxcvi) {
 446                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
 447                }
 448                goto raise_excp;
 449            }
 450            break;
 451        case FPSCR_OE:
 452            if (fpscr_ox != 0) {
 453            raise_oe:
 454                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
 455                goto raise_excp;
 456            }
 457            break;
 458        case FPSCR_UE:
 459            if (fpscr_ux != 0) {
 460            raise_ue:
 461                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
 462                goto raise_excp;
 463            }
 464            break;
 465        case FPSCR_ZE:
 466            if (fpscr_zx != 0) {
 467            raise_ze:
 468                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
 469                goto raise_excp;
 470            }
 471            break;
 472        case FPSCR_XE:
 473            if (fpscr_xx != 0) {
 474            raise_xe:
 475                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
 476                goto raise_excp;
 477            }
 478            break;
 479        case FPSCR_RN1:
 480        case FPSCR_RN:
 481            fpscr_set_rounding_mode(env);
 482            break;
 483        default:
 484            break;
 485        raise_excp:
 486            /* Update the floating-point enabled exception summary */
 487            env->fpscr |= 1 << FPSCR_FEX;
 488            /* We have to update Rc1 before raising the exception */
 489            cs->exception_index = POWERPC_EXCP_PROGRAM;
 490            break;
 491        }
 492    }
 493}
 494
 495void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 496{
 497    CPUState *cs = CPU(ppc_env_get_cpu(env));
 498    target_ulong prev, new;
 499    int i;
 500
 501    prev = env->fpscr;
 502    new = (target_ulong)arg;
 503    new &= ~0x60000000LL;
 504    new |= prev & 0x60000000LL;
 505    for (i = 0; i < sizeof(target_ulong) * 2; i++) {
 506        if (mask & (1 << i)) {
 507            env->fpscr &= ~(0xFLL << (4 * i));
 508            env->fpscr |= new & (0xFLL << (4 * i));
 509        }
 510    }
 511    /* Update VX and FEX */
 512    if (fpscr_ix != 0) {
 513        env->fpscr |= 1 << FPSCR_VX;
 514    } else {
 515        env->fpscr &= ~(1 << FPSCR_VX);
 516    }
 517    if ((fpscr_ex & fpscr_eex) != 0) {
 518        env->fpscr |= 1 << FPSCR_FEX;
 519        cs->exception_index = POWERPC_EXCP_PROGRAM;
 520        /* XXX: we should compute it properly */
 521        env->error_code = POWERPC_EXCP_FP;
 522    } else {
 523        env->fpscr &= ~(1 << FPSCR_FEX);
 524    }
 525    fpscr_set_rounding_mode(env);
 526}
 527
 528void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
 529{
 530    helper_store_fpscr(env, arg, mask);
 531}
 532
 533static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
 534{
 535    CPUState *cs = CPU(ppc_env_get_cpu(env));
 536    int status = get_float_exception_flags(&env->fp_status);
 537    bool inexact_happened = false;
 538
 539    if (status & float_flag_divbyzero) {
 540        float_zero_divide_excp(env, raddr);
 541    } else if (status & float_flag_overflow) {
 542        float_overflow_excp(env);
 543    } else if (status & float_flag_underflow) {
 544        float_underflow_excp(env);
 545    } else if (status & float_flag_inexact) {
 546        float_inexact_excp(env);
 547        inexact_happened = true;
 548    }
 549
 550    /* if the inexact flag was not set */
 551    if (inexact_happened == false) {
 552        env->fpscr &= ~(1 << FPSCR_FI); /* clear the FPSCR[FI] bit */
 553    }
 554
 555    if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
 556        (env->error_code & POWERPC_EXCP_FP)) {
 557        /* Differred floating-point exception after target FPR update */
 558        if (msr_fe0 != 0 || msr_fe1 != 0) {
 559            raise_exception_err_ra(env, cs->exception_index,
 560                                   env->error_code, raddr);
 561        }
 562    }
 563}
 564
 565static inline  __attribute__((__always_inline__))
 566void float_check_status(CPUPPCState *env)
 567{
 568    /* GETPC() works here because this is inline */
 569    do_float_check_status(env, GETPC());
 570}
 571
 572void helper_float_check_status(CPUPPCState *env)
 573{
 574    do_float_check_status(env, GETPC());
 575}
 576
 577void helper_reset_fpstatus(CPUPPCState *env)
 578{
 579    set_float_exception_flags(0, &env->fp_status);
 580}
 581
 582/* fadd - fadd. */
 583uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
 584{
 585    CPU_DoubleU farg1, farg2;
 586
 587    farg1.ll = arg1;
 588    farg2.ll = arg2;
 589
 590    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
 591                 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
 592        /* Magnitude subtraction of infinities */
 593        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
 594    } else {
 595        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
 596                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
 597            /* sNaN addition */
 598            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 599        }
 600        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
 601    }
 602
 603    return farg1.ll;
 604}
 605
 606/* fsub - fsub. */
 607uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
 608{
 609    CPU_DoubleU farg1, farg2;
 610
 611    farg1.ll = arg1;
 612    farg2.ll = arg2;
 613
 614    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
 615                 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
 616        /* Magnitude subtraction of infinities */
 617        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
 618    } else {
 619        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
 620                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
 621            /* sNaN subtraction */
 622            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 623        }
 624        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
 625    }
 626
 627    return farg1.ll;
 628}
 629
 630/* fmul - fmul. */
 631uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
 632{
 633    CPU_DoubleU farg1, farg2;
 634
 635    farg1.ll = arg1;
 636    farg2.ll = arg2;
 637
 638    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
 639                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
 640        /* Multiplication of zero by infinity */
 641        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
 642    } else {
 643        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
 644                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
 645            /* sNaN multiplication */
 646            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 647        }
 648        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
 649    }
 650
 651    return farg1.ll;
 652}
 653
 654/* fdiv - fdiv. */
 655uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
 656{
 657    CPU_DoubleU farg1, farg2;
 658
 659    farg1.ll = arg1;
 660    farg2.ll = arg2;
 661
 662    if (unlikely(float64_is_infinity(farg1.d) &&
 663                 float64_is_infinity(farg2.d))) {
 664        /* Division of infinity by infinity */
 665        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
 666    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
 667        /* Division of zero by zero */
 668        farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
 669    } else {
 670        if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
 671                     float64_is_signaling_nan(farg2.d, &env->fp_status))) {
 672            /* sNaN division */
 673            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 674        }
 675        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
 676    }
 677
 678    return farg1.ll;
 679}
 680
 681
 682#define FPU_FCTI(op, cvt, nanval)                                      \
 683uint64_t helper_##op(CPUPPCState *env, uint64_t arg)                   \
 684{                                                                      \
 685    CPU_DoubleU farg;                                                  \
 686                                                                       \
 687    farg.ll = arg;                                                     \
 688    farg.ll = float64_to_##cvt(farg.d, &env->fp_status);               \
 689                                                                       \
 690    if (unlikely(env->fp_status.float_exception_flags)) {              \
 691        if (float64_is_any_nan(arg)) {                                 \
 692            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
 693            if (float64_is_signaling_nan(arg, &env->fp_status)) {      \
 694                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
 695            }                                                          \
 696            farg.ll = nanval;                                          \
 697        } else if (env->fp_status.float_exception_flags &              \
 698                   float_flag_invalid) {                               \
 699            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
 700        }                                                              \
 701        float_check_status(env);                                       \
 702    }                                                                  \
 703    return farg.ll;                                                    \
 704 }
 705
 706FPU_FCTI(fctiw, int32, 0x80000000U)
 707FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
 708FPU_FCTI(fctiwu, uint32, 0x00000000U)
 709FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
 710FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
 711FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
 712FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
 713FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
 714
 715#define FPU_FCFI(op, cvtr, is_single)                      \
 716uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
 717{                                                          \
 718    CPU_DoubleU farg;                                      \
 719                                                           \
 720    if (is_single) {                                       \
 721        float32 tmp = cvtr(arg, &env->fp_status);          \
 722        farg.d = float32_to_float64(tmp, &env->fp_status); \
 723    } else {                                               \
 724        farg.d = cvtr(arg, &env->fp_status);               \
 725    }                                                      \
 726    float_check_status(env);                               \
 727    return farg.ll;                                        \
 728}
 729
 730FPU_FCFI(fcfid, int64_to_float64, 0)
 731FPU_FCFI(fcfids, int64_to_float32, 1)
 732FPU_FCFI(fcfidu, uint64_to_float64, 0)
 733FPU_FCFI(fcfidus, uint64_to_float32, 1)
 734
 735static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
 736                              int rounding_mode)
 737{
 738    CPU_DoubleU farg;
 739
 740    farg.ll = arg;
 741
 742    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 743        /* sNaN round */
 744        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 745        farg.ll = arg | 0x0008000000000000ULL;
 746    } else {
 747        int inexact = get_float_exception_flags(&env->fp_status) &
 748                      float_flag_inexact;
 749        set_float_rounding_mode(rounding_mode, &env->fp_status);
 750        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
 751        /* Restore rounding mode from FPSCR */
 752        fpscr_set_rounding_mode(env);
 753
 754        /* fri* does not set FPSCR[XX] */
 755        if (!inexact) {
 756            env->fp_status.float_exception_flags &= ~float_flag_inexact;
 757        }
 758    }
 759    float_check_status(env);
 760    return farg.ll;
 761}
 762
 763uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
 764{
 765    return do_fri(env, arg, float_round_ties_away);
 766}
 767
 768uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
 769{
 770    return do_fri(env, arg, float_round_to_zero);
 771}
 772
 773uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
 774{
 775    return do_fri(env, arg, float_round_up);
 776}
 777
 778uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
 779{
 780    return do_fri(env, arg, float_round_down);
 781}
 782
 783#define FPU_MADDSUB_UPDATE(NAME, TP)                                    \
 784static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3,           \
 785                 unsigned int madd_flags)                               \
 786{                                                                       \
 787    if (TP##_is_signaling_nan(arg1, &env->fp_status) ||                 \
 788        TP##_is_signaling_nan(arg2, &env->fp_status) ||                 \
 789        TP##_is_signaling_nan(arg3, &env->fp_status)) {                 \
 790        /* sNaN operation */                                            \
 791        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);          \
 792    }                                                                   \
 793    if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) ||               \
 794        (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) {               \
 795        /* Multiplication of zero by infinity */                        \
 796        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);           \
 797    }                                                                   \
 798    if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) &&           \
 799        TP##_is_infinity(arg3)) {                                       \
 800        uint8_t aSign, bSign, cSign;                                    \
 801                                                                        \
 802        aSign = TP##_is_neg(arg1);                                      \
 803        bSign = TP##_is_neg(arg2);                                      \
 804        cSign = TP##_is_neg(arg3);                                      \
 805        if (madd_flags & float_muladd_negate_c) {                       \
 806            cSign ^= 1;                                                 \
 807        }                                                               \
 808        if (aSign ^ bSign ^ cSign) {                                    \
 809            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);       \
 810        }                                                               \
 811    }                                                                   \
 812}
 813FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
 814FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
 815
 816#define FPU_FMADD(op, madd_flags)                                       \
 817uint64_t helper_##op(CPUPPCState *env, uint64_t arg1,                   \
 818                     uint64_t arg2, uint64_t arg3)                      \
 819{                                                                       \
 820    uint32_t flags;                                                     \
 821    float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags,          \
 822                                 &env->fp_status);                      \
 823    flags = get_float_exception_flags(&env->fp_status);                 \
 824    if (flags) {                                                        \
 825        if (flags & float_flag_invalid) {                               \
 826            float64_maddsub_update_excp(env, arg1, arg2, arg3,          \
 827                                        madd_flags);                    \
 828        }                                                               \
 829        float_check_status(env);                                        \
 830    }                                                                   \
 831    return ret;                                                         \
 832}
 833
 834#define MADD_FLGS 0
 835#define MSUB_FLGS float_muladd_negate_c
 836#define NMADD_FLGS float_muladd_negate_result
 837#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
 838
 839FPU_FMADD(fmadd, MADD_FLGS)
 840FPU_FMADD(fnmadd, NMADD_FLGS)
 841FPU_FMADD(fmsub, MSUB_FLGS)
 842FPU_FMADD(fnmsub, NMSUB_FLGS)
 843
 844/* frsp - frsp. */
 845uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
 846{
 847    CPU_DoubleU farg;
 848    float32 f32;
 849
 850    farg.ll = arg;
 851
 852    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 853        /* sNaN square root */
 854        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 855    }
 856    f32 = float64_to_float32(farg.d, &env->fp_status);
 857    farg.d = float32_to_float64(f32, &env->fp_status);
 858
 859    return farg.ll;
 860}
 861
 862/* fsqrt - fsqrt. */
 863uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
 864{
 865    CPU_DoubleU farg;
 866
 867    farg.ll = arg;
 868
 869    if (unlikely(float64_is_any_nan(farg.d))) {
 870        if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 871            /* sNaN reciprocal square root */
 872            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 873            farg.ll = float64_snan_to_qnan(farg.ll);
 874        }
 875    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
 876        /* Square root of a negative nonzero number */
 877        farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
 878    } else {
 879        farg.d = float64_sqrt(farg.d, &env->fp_status);
 880    }
 881    return farg.ll;
 882}
 883
 884/* fre - fre. */
 885uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
 886{
 887    CPU_DoubleU farg;
 888
 889    farg.ll = arg;
 890
 891    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 892        /* sNaN reciprocal */
 893        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 894    }
 895    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
 896    return farg.d;
 897}
 898
 899/* fres - fres. */
 900uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
 901{
 902    CPU_DoubleU farg;
 903    float32 f32;
 904
 905    farg.ll = arg;
 906
 907    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 908        /* sNaN reciprocal */
 909        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 910    }
 911    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
 912    f32 = float64_to_float32(farg.d, &env->fp_status);
 913    farg.d = float32_to_float64(f32, &env->fp_status);
 914
 915    return farg.ll;
 916}
 917
 918/* frsqrte  - frsqrte. */
 919uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
 920{
 921    CPU_DoubleU farg;
 922
 923    farg.ll = arg;
 924
 925    if (unlikely(float64_is_any_nan(farg.d))) {
 926        if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
 927            /* sNaN reciprocal square root */
 928            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
 929            farg.ll = float64_snan_to_qnan(farg.ll);
 930        }
 931    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
 932        /* Reciprocal square root of a negative nonzero number */
 933        farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
 934    } else {
 935        farg.d = float64_sqrt(farg.d, &env->fp_status);
 936        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
 937    }
 938
 939    return farg.ll;
 940}
 941
 942/* fsel - fsel. */
 943uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
 944                     uint64_t arg3)
 945{
 946    CPU_DoubleU farg1;
 947
 948    farg1.ll = arg1;
 949
 950    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
 951        !float64_is_any_nan(farg1.d)) {
 952        return arg2;
 953    } else {
 954        return arg3;
 955    }
 956}
 957
 958uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
 959{
 960    int fe_flag = 0;
 961    int fg_flag = 0;
 962
 963    if (unlikely(float64_is_infinity(fra) ||
 964                 float64_is_infinity(frb) ||
 965                 float64_is_zero(frb))) {
 966        fe_flag = 1;
 967        fg_flag = 1;
 968    } else {
 969        int e_a = ppc_float64_get_unbiased_exp(fra);
 970        int e_b = ppc_float64_get_unbiased_exp(frb);
 971
 972        if (unlikely(float64_is_any_nan(fra) ||
 973                     float64_is_any_nan(frb))) {
 974            fe_flag = 1;
 975        } else if ((e_b <= -1022) || (e_b >= 1021)) {
 976            fe_flag = 1;
 977        } else if (!float64_is_zero(fra) &&
 978                   (((e_a - e_b) >= 1023) ||
 979                    ((e_a - e_b) <= -1021) ||
 980                    (e_a <= -970))) {
 981            fe_flag = 1;
 982        }
 983
 984        if (unlikely(float64_is_zero_or_denormal(frb))) {
 985            /* XB is not zero because of the above check and */
 986            /* so must be denormalized.                      */
 987            fg_flag = 1;
 988        }
 989    }
 990
 991    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
 992}
 993
 994uint32_t helper_ftsqrt(uint64_t frb)
 995{
 996    int fe_flag = 0;
 997    int fg_flag = 0;
 998
 999    if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
1000        fe_flag = 1;
1001        fg_flag = 1;
1002    } else {
1003        int e_b = ppc_float64_get_unbiased_exp(frb);
1004
1005        if (unlikely(float64_is_any_nan(frb))) {
1006            fe_flag = 1;
1007        } else if (unlikely(float64_is_zero(frb))) {
1008            fe_flag = 1;
1009        } else if (unlikely(float64_is_neg(frb))) {
1010            fe_flag = 1;
1011        } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
1012            fe_flag = 1;
1013        }
1014
1015        if (unlikely(float64_is_zero_or_denormal(frb))) {
1016            /* XB is not zero because of the above check and */
1017            /* therefore must be denormalized.               */
1018            fg_flag = 1;
1019        }
1020    }
1021
1022    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1023}
1024
1025void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1026                  uint32_t crfD)
1027{
1028    CPU_DoubleU farg1, farg2;
1029    uint32_t ret = 0;
1030
1031    farg1.ll = arg1;
1032    farg2.ll = arg2;
1033
1034    if (unlikely(float64_is_any_nan(farg1.d) ||
1035                 float64_is_any_nan(farg2.d))) {
1036        ret = 0x01UL;
1037    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1038        ret = 0x08UL;
1039    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1040        ret = 0x04UL;
1041    } else {
1042        ret = 0x02UL;
1043    }
1044
1045    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1046    env->fpscr |= ret << FPSCR_FPRF;
1047    env->crf[crfD] = ret;
1048    if (unlikely(ret == 0x01UL
1049                 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1050                     float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1051        /* sNaN comparison */
1052        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1053    }
1054}
1055
1056void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1057                  uint32_t crfD)
1058{
1059    CPU_DoubleU farg1, farg2;
1060    uint32_t ret = 0;
1061
1062    farg1.ll = arg1;
1063    farg2.ll = arg2;
1064
1065    if (unlikely(float64_is_any_nan(farg1.d) ||
1066                 float64_is_any_nan(farg2.d))) {
1067        ret = 0x01UL;
1068    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1069        ret = 0x08UL;
1070    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1071        ret = 0x04UL;
1072    } else {
1073        ret = 0x02UL;
1074    }
1075
1076    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1077    env->fpscr |= ret << FPSCR_FPRF;
1078    env->crf[crfD] = ret;
1079    if (unlikely(ret == 0x01UL)) {
1080        if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1081            float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1082            /* sNaN comparison */
1083            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
1084                                  POWERPC_EXCP_FP_VXVC, 1);
1085        } else {
1086            /* qNaN comparison */
1087            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
1088        }
1089    }
1090}
1091
1092/* Single-precision floating-point conversions */
1093static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1094{
1095    CPU_FloatU u;
1096
1097    u.f = int32_to_float32(val, &env->vec_status);
1098
1099    return u.l;
1100}
1101
1102static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1103{
1104    CPU_FloatU u;
1105
1106    u.f = uint32_to_float32(val, &env->vec_status);
1107
1108    return u.l;
1109}
1110
1111static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1112{
1113    CPU_FloatU u;
1114
1115    u.l = val;
1116    /* NaN are not treated the same way IEEE 754 does */
1117    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1118        return 0;
1119    }
1120
1121    return float32_to_int32(u.f, &env->vec_status);
1122}
1123
1124static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1125{
1126    CPU_FloatU u;
1127
1128    u.l = val;
1129    /* NaN are not treated the same way IEEE 754 does */
1130    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1131        return 0;
1132    }
1133
1134    return float32_to_uint32(u.f, &env->vec_status);
1135}
1136
1137static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1138{
1139    CPU_FloatU u;
1140
1141    u.l = val;
1142    /* NaN are not treated the same way IEEE 754 does */
1143    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1144        return 0;
1145    }
1146
1147    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1148}
1149
1150static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1151{
1152    CPU_FloatU u;
1153
1154    u.l = val;
1155    /* NaN are not treated the same way IEEE 754 does */
1156    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1157        return 0;
1158    }
1159
1160    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1161}
1162
1163static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1164{
1165    CPU_FloatU u;
1166    float32 tmp;
1167
1168    u.f = int32_to_float32(val, &env->vec_status);
1169    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1170    u.f = float32_div(u.f, tmp, &env->vec_status);
1171
1172    return u.l;
1173}
1174
1175static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1176{
1177    CPU_FloatU u;
1178    float32 tmp;
1179
1180    u.f = uint32_to_float32(val, &env->vec_status);
1181    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1182    u.f = float32_div(u.f, tmp, &env->vec_status);
1183
1184    return u.l;
1185}
1186
1187static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1188{
1189    CPU_FloatU u;
1190    float32 tmp;
1191
1192    u.l = val;
1193    /* NaN are not treated the same way IEEE 754 does */
1194    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1195        return 0;
1196    }
1197    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1198    u.f = float32_mul(u.f, tmp, &env->vec_status);
1199
1200    return float32_to_int32(u.f, &env->vec_status);
1201}
1202
1203static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1204{
1205    CPU_FloatU u;
1206    float32 tmp;
1207
1208    u.l = val;
1209    /* NaN are not treated the same way IEEE 754 does */
1210    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1211        return 0;
1212    }
1213    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1214    u.f = float32_mul(u.f, tmp, &env->vec_status);
1215
1216    return float32_to_uint32(u.f, &env->vec_status);
1217}
1218
1219#define HELPER_SPE_SINGLE_CONV(name)                              \
1220    uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1221    {                                                             \
1222        return e##name(env, val);                                 \
1223    }
1224/* efscfsi */
1225HELPER_SPE_SINGLE_CONV(fscfsi);
1226/* efscfui */
1227HELPER_SPE_SINGLE_CONV(fscfui);
1228/* efscfuf */
1229HELPER_SPE_SINGLE_CONV(fscfuf);
1230/* efscfsf */
1231HELPER_SPE_SINGLE_CONV(fscfsf);
1232/* efsctsi */
1233HELPER_SPE_SINGLE_CONV(fsctsi);
1234/* efsctui */
1235HELPER_SPE_SINGLE_CONV(fsctui);
1236/* efsctsiz */
1237HELPER_SPE_SINGLE_CONV(fsctsiz);
1238/* efsctuiz */
1239HELPER_SPE_SINGLE_CONV(fsctuiz);
1240/* efsctsf */
1241HELPER_SPE_SINGLE_CONV(fsctsf);
1242/* efsctuf */
1243HELPER_SPE_SINGLE_CONV(fsctuf);
1244
1245#define HELPER_SPE_VECTOR_CONV(name)                            \
1246    uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1247    {                                                           \
1248        return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1249            (uint64_t)e##name(env, val);                        \
1250    }
1251/* evfscfsi */
1252HELPER_SPE_VECTOR_CONV(fscfsi);
1253/* evfscfui */
1254HELPER_SPE_VECTOR_CONV(fscfui);
1255/* evfscfuf */
1256HELPER_SPE_VECTOR_CONV(fscfuf);
1257/* evfscfsf */
1258HELPER_SPE_VECTOR_CONV(fscfsf);
1259/* evfsctsi */
1260HELPER_SPE_VECTOR_CONV(fsctsi);
1261/* evfsctui */
1262HELPER_SPE_VECTOR_CONV(fsctui);
1263/* evfsctsiz */
1264HELPER_SPE_VECTOR_CONV(fsctsiz);
1265/* evfsctuiz */
1266HELPER_SPE_VECTOR_CONV(fsctuiz);
1267/* evfsctsf */
1268HELPER_SPE_VECTOR_CONV(fsctsf);
1269/* evfsctuf */
1270HELPER_SPE_VECTOR_CONV(fsctuf);
1271
1272/* Single-precision floating-point arithmetic */
1273static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1274{
1275    CPU_FloatU u1, u2;
1276
1277    u1.l = op1;
1278    u2.l = op2;
1279    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1280    return u1.l;
1281}
1282
1283static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1284{
1285    CPU_FloatU u1, u2;
1286
1287    u1.l = op1;
1288    u2.l = op2;
1289    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1290    return u1.l;
1291}
1292
1293static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1294{
1295    CPU_FloatU u1, u2;
1296
1297    u1.l = op1;
1298    u2.l = op2;
1299    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1300    return u1.l;
1301}
1302
1303static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1304{
1305    CPU_FloatU u1, u2;
1306
1307    u1.l = op1;
1308    u2.l = op2;
1309    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1310    return u1.l;
1311}
1312
1313#define HELPER_SPE_SINGLE_ARITH(name)                                   \
1314    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1315    {                                                                   \
1316        return e##name(env, op1, op2);                                  \
1317    }
1318/* efsadd */
1319HELPER_SPE_SINGLE_ARITH(fsadd);
1320/* efssub */
1321HELPER_SPE_SINGLE_ARITH(fssub);
1322/* efsmul */
1323HELPER_SPE_SINGLE_ARITH(fsmul);
1324/* efsdiv */
1325HELPER_SPE_SINGLE_ARITH(fsdiv);
1326
1327#define HELPER_SPE_VECTOR_ARITH(name)                                   \
1328    uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1329    {                                                                   \
1330        return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1331            (uint64_t)e##name(env, op1, op2);                           \
1332    }
1333/* evfsadd */
1334HELPER_SPE_VECTOR_ARITH(fsadd);
1335/* evfssub */
1336HELPER_SPE_VECTOR_ARITH(fssub);
1337/* evfsmul */
1338HELPER_SPE_VECTOR_ARITH(fsmul);
1339/* evfsdiv */
1340HELPER_SPE_VECTOR_ARITH(fsdiv);
1341
1342/* Single-precision floating-point comparisons */
1343static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1344{
1345    CPU_FloatU u1, u2;
1346
1347    u1.l = op1;
1348    u2.l = op2;
1349    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1350}
1351
1352static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1353{
1354    CPU_FloatU u1, u2;
1355
1356    u1.l = op1;
1357    u2.l = op2;
1358    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1359}
1360
1361static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1362{
1363    CPU_FloatU u1, u2;
1364
1365    u1.l = op1;
1366    u2.l = op2;
1367    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1368}
1369
1370static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1371{
1372    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1373    return efscmplt(env, op1, op2);
1374}
1375
1376static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1377{
1378    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1379    return efscmpgt(env, op1, op2);
1380}
1381
1382static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1383{
1384    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1385    return efscmpeq(env, op1, op2);
1386}
1387
1388#define HELPER_SINGLE_SPE_CMP(name)                                     \
1389    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1390    {                                                                   \
1391        return e##name(env, op1, op2);                                  \
1392    }
1393/* efststlt */
1394HELPER_SINGLE_SPE_CMP(fststlt);
1395/* efststgt */
1396HELPER_SINGLE_SPE_CMP(fststgt);
1397/* efststeq */
1398HELPER_SINGLE_SPE_CMP(fststeq);
1399/* efscmplt */
1400HELPER_SINGLE_SPE_CMP(fscmplt);
1401/* efscmpgt */
1402HELPER_SINGLE_SPE_CMP(fscmpgt);
1403/* efscmpeq */
1404HELPER_SINGLE_SPE_CMP(fscmpeq);
1405
1406static inline uint32_t evcmp_merge(int t0, int t1)
1407{
1408    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1409}
1410
1411#define HELPER_VECTOR_SPE_CMP(name)                                     \
1412    uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1413    {                                                                   \
1414        return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1415                           e##name(env, op1, op2));                     \
1416    }
1417/* evfststlt */
1418HELPER_VECTOR_SPE_CMP(fststlt);
1419/* evfststgt */
1420HELPER_VECTOR_SPE_CMP(fststgt);
1421/* evfststeq */
1422HELPER_VECTOR_SPE_CMP(fststeq);
1423/* evfscmplt */
1424HELPER_VECTOR_SPE_CMP(fscmplt);
1425/* evfscmpgt */
1426HELPER_VECTOR_SPE_CMP(fscmpgt);
1427/* evfscmpeq */
1428HELPER_VECTOR_SPE_CMP(fscmpeq);
1429
1430/* Double-precision floating-point conversion */
1431uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1432{
1433    CPU_DoubleU u;
1434
1435    u.d = int32_to_float64(val, &env->vec_status);
1436
1437    return u.ll;
1438}
1439
1440uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1441{
1442    CPU_DoubleU u;
1443
1444    u.d = int64_to_float64(val, &env->vec_status);
1445
1446    return u.ll;
1447}
1448
1449uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1450{
1451    CPU_DoubleU u;
1452
1453    u.d = uint32_to_float64(val, &env->vec_status);
1454
1455    return u.ll;
1456}
1457
1458uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1459{
1460    CPU_DoubleU u;
1461
1462    u.d = uint64_to_float64(val, &env->vec_status);
1463
1464    return u.ll;
1465}
1466
1467uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1468{
1469    CPU_DoubleU u;
1470
1471    u.ll = val;
1472    /* NaN are not treated the same way IEEE 754 does */
1473    if (unlikely(float64_is_any_nan(u.d))) {
1474        return 0;
1475    }
1476
1477    return float64_to_int32(u.d, &env->vec_status);
1478}
1479
1480uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1481{
1482    CPU_DoubleU u;
1483
1484    u.ll = val;
1485    /* NaN are not treated the same way IEEE 754 does */
1486    if (unlikely(float64_is_any_nan(u.d))) {
1487        return 0;
1488    }
1489
1490    return float64_to_uint32(u.d, &env->vec_status);
1491}
1492
1493uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1494{
1495    CPU_DoubleU u;
1496
1497    u.ll = val;
1498    /* NaN are not treated the same way IEEE 754 does */
1499    if (unlikely(float64_is_any_nan(u.d))) {
1500        return 0;
1501    }
1502
1503    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1504}
1505
1506uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1507{
1508    CPU_DoubleU u;
1509
1510    u.ll = val;
1511    /* NaN are not treated the same way IEEE 754 does */
1512    if (unlikely(float64_is_any_nan(u.d))) {
1513        return 0;
1514    }
1515
1516    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1517}
1518
1519uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1520{
1521    CPU_DoubleU u;
1522
1523    u.ll = val;
1524    /* NaN are not treated the same way IEEE 754 does */
1525    if (unlikely(float64_is_any_nan(u.d))) {
1526        return 0;
1527    }
1528
1529    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1530}
1531
1532uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1533{
1534    CPU_DoubleU u;
1535
1536    u.ll = val;
1537    /* NaN are not treated the same way IEEE 754 does */
1538    if (unlikely(float64_is_any_nan(u.d))) {
1539        return 0;
1540    }
1541
1542    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1543}
1544
1545uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1546{
1547    CPU_DoubleU u;
1548    float64 tmp;
1549
1550    u.d = int32_to_float64(val, &env->vec_status);
1551    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1552    u.d = float64_div(u.d, tmp, &env->vec_status);
1553
1554    return u.ll;
1555}
1556
1557uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1558{
1559    CPU_DoubleU u;
1560    float64 tmp;
1561
1562    u.d = uint32_to_float64(val, &env->vec_status);
1563    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1564    u.d = float64_div(u.d, tmp, &env->vec_status);
1565
1566    return u.ll;
1567}
1568
1569uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1570{
1571    CPU_DoubleU u;
1572    float64 tmp;
1573
1574    u.ll = val;
1575    /* NaN are not treated the same way IEEE 754 does */
1576    if (unlikely(float64_is_any_nan(u.d))) {
1577        return 0;
1578    }
1579    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1580    u.d = float64_mul(u.d, tmp, &env->vec_status);
1581
1582    return float64_to_int32(u.d, &env->vec_status);
1583}
1584
1585uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1586{
1587    CPU_DoubleU u;
1588    float64 tmp;
1589
1590    u.ll = val;
1591    /* NaN are not treated the same way IEEE 754 does */
1592    if (unlikely(float64_is_any_nan(u.d))) {
1593        return 0;
1594    }
1595    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1596    u.d = float64_mul(u.d, tmp, &env->vec_status);
1597
1598    return float64_to_uint32(u.d, &env->vec_status);
1599}
1600
1601uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1602{
1603    CPU_DoubleU u1;
1604    CPU_FloatU u2;
1605
1606    u1.ll = val;
1607    u2.f = float64_to_float32(u1.d, &env->vec_status);
1608
1609    return u2.l;
1610}
1611
1612uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1613{
1614    CPU_DoubleU u2;
1615    CPU_FloatU u1;
1616
1617    u1.l = val;
1618    u2.d = float32_to_float64(u1.f, &env->vec_status);
1619
1620    return u2.ll;
1621}
1622
1623/* Double precision fixed-point arithmetic */
1624uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1625{
1626    CPU_DoubleU u1, u2;
1627
1628    u1.ll = op1;
1629    u2.ll = op2;
1630    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1631    return u1.ll;
1632}
1633
1634uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1635{
1636    CPU_DoubleU u1, u2;
1637
1638    u1.ll = op1;
1639    u2.ll = op2;
1640    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1641    return u1.ll;
1642}
1643
1644uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1645{
1646    CPU_DoubleU u1, u2;
1647
1648    u1.ll = op1;
1649    u2.ll = op2;
1650    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1651    return u1.ll;
1652}
1653
1654uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1655{
1656    CPU_DoubleU u1, u2;
1657
1658    u1.ll = op1;
1659    u2.ll = op2;
1660    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1661    return u1.ll;
1662}
1663
1664/* Double precision floating point helpers */
1665uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1666{
1667    CPU_DoubleU u1, u2;
1668
1669    u1.ll = op1;
1670    u2.ll = op2;
1671    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1672}
1673
1674uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1675{
1676    CPU_DoubleU u1, u2;
1677
1678    u1.ll = op1;
1679    u2.ll = op2;
1680    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1681}
1682
1683uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1684{
1685    CPU_DoubleU u1, u2;
1686
1687    u1.ll = op1;
1688    u2.ll = op2;
1689    return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1690}
1691
1692uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1693{
1694    /* XXX: TODO: test special values (NaN, infinites, ...) */
1695    return helper_efdtstlt(env, op1, op2);
1696}
1697
1698uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1699{
1700    /* XXX: TODO: test special values (NaN, infinites, ...) */
1701    return helper_efdtstgt(env, op1, op2);
1702}
1703
1704uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1705{
1706    /* XXX: TODO: test special values (NaN, infinites, ...) */
1707    return helper_efdtsteq(env, op1, op2);
1708}
1709
1710#define float64_to_float64(x, env) x
1711
1712
1713/* VSX_ADD_SUB - VSX floating point add/subract
1714 *   name  - instruction mnemonic
1715 *   op    - operation (add or sub)
1716 *   nels  - number of elements (1, 2 or 4)
1717 *   tp    - type (float32 or float64)
1718 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1719 *   sfprf - set FPRF
1720 */
1721#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
1722void helper_##name(CPUPPCState *env, uint32_t opcode)                        \
1723{                                                                            \
1724    ppc_vsr_t xt, xa, xb;                                                    \
1725    int i;                                                                   \
1726                                                                             \
1727    getVSR(xA(opcode), &xa, env);                                            \
1728    getVSR(xB(opcode), &xb, env);                                            \
1729    getVSR(xT(opcode), &xt, env);                                            \
1730    helper_reset_fpstatus(env);                                              \
1731                                                                             \
1732    for (i = 0; i < nels; i++) {                                             \
1733        float_status tstat = env->fp_status;                                 \
1734        set_float_exception_flags(0, &tstat);                                \
1735        xt.fld = tp##_##op(xa.fld, xb.fld, &tstat);                          \
1736        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1737                                                                             \
1738        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1739            if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {      \
1740                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);    \
1741            } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||              \
1742                       tp##_is_signaling_nan(xb.fld, &tstat)) {              \
1743                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1744            }                                                                \
1745        }                                                                    \
1746                                                                             \
1747        if (r2sp) {                                                          \
1748            xt.fld = helper_frsp(env, xt.fld);                               \
1749        }                                                                    \
1750                                                                             \
1751        if (sfprf) {                                                         \
1752            helper_compute_fprf_float64(env, xt.fld);                        \
1753        }                                                                    \
1754    }                                                                        \
1755    putVSR(xT(opcode), &xt, env);                                            \
1756    float_check_status(env);                                                 \
1757}
1758
1759VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1760VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1761VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1762VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1763VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1764VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1765VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1766VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1767
1768void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
1769{
1770    ppc_vsr_t xt, xa, xb;
1771    float_status tstat;
1772
1773    getVSR(rA(opcode) + 32, &xa, env);
1774    getVSR(rB(opcode) + 32, &xb, env);
1775    getVSR(rD(opcode) + 32, &xt, env);
1776    helper_reset_fpstatus(env);
1777
1778    tstat = env->fp_status;
1779    if (unlikely(Rc(opcode) != 0)) {
1780        tstat.float_rounding_mode = float_round_to_odd;
1781    }
1782
1783    set_float_exception_flags(0, &tstat);
1784    xt.f128 = float128_add(xa.f128, xb.f128, &tstat);
1785    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1786
1787    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1788        if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
1789            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
1790        } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1791                   float128_is_signaling_nan(xb.f128, &tstat)) {
1792            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1793        }
1794    }
1795
1796    helper_compute_fprf_float128(env, xt.f128);
1797
1798    putVSR(rD(opcode) + 32, &xt, env);
1799    float_check_status(env);
1800}
1801
1802/* VSX_MUL - VSX floating point multiply
1803 *   op    - instruction mnemonic
1804 *   nels  - number of elements (1, 2 or 4)
1805 *   tp    - type (float32 or float64)
1806 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1807 *   sfprf - set FPRF
1808 */
1809#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
1810void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1811{                                                                            \
1812    ppc_vsr_t xt, xa, xb;                                                    \
1813    int i;                                                                   \
1814                                                                             \
1815    getVSR(xA(opcode), &xa, env);                                            \
1816    getVSR(xB(opcode), &xb, env);                                            \
1817    getVSR(xT(opcode), &xt, env);                                            \
1818    helper_reset_fpstatus(env);                                              \
1819                                                                             \
1820    for (i = 0; i < nels; i++) {                                             \
1821        float_status tstat = env->fp_status;                                 \
1822        set_float_exception_flags(0, &tstat);                                \
1823        xt.fld = tp##_mul(xa.fld, xb.fld, &tstat);                           \
1824        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1825                                                                             \
1826        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1827            if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) ||        \
1828                (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) {        \
1829                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf);    \
1830            } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||              \
1831                       tp##_is_signaling_nan(xb.fld, &tstat)) {              \
1832                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1833            }                                                                \
1834        }                                                                    \
1835                                                                             \
1836        if (r2sp) {                                                          \
1837            xt.fld = helper_frsp(env, xt.fld);                               \
1838        }                                                                    \
1839                                                                             \
1840        if (sfprf) {                                                         \
1841            helper_compute_fprf_float64(env, xt.fld);                        \
1842        }                                                                    \
1843    }                                                                        \
1844                                                                             \
1845    putVSR(xT(opcode), &xt, env);                                            \
1846    float_check_status(env);                                                 \
1847}
1848
1849VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1850VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1851VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1852VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1853
1854void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
1855{
1856    ppc_vsr_t xt, xa, xb;
1857    float_status tstat;
1858
1859    getVSR(rA(opcode) + 32, &xa, env);
1860    getVSR(rB(opcode) + 32, &xb, env);
1861    getVSR(rD(opcode) + 32, &xt, env);
1862
1863    helper_reset_fpstatus(env);
1864    tstat = env->fp_status;
1865    if (unlikely(Rc(opcode) != 0)) {
1866        tstat.float_rounding_mode = float_round_to_odd;
1867    }
1868
1869    set_float_exception_flags(0, &tstat);
1870    xt.f128 = float128_mul(xa.f128, xb.f128, &tstat);
1871    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1872
1873    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1874        if ((float128_is_infinity(xa.f128) && float128_is_zero(xb.f128)) ||
1875            (float128_is_infinity(xb.f128) && float128_is_zero(xa.f128))) {
1876            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
1877        } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1878                   float128_is_signaling_nan(xb.f128, &tstat)) {
1879            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1880        }
1881    }
1882    helper_compute_fprf_float128(env, xt.f128);
1883
1884    putVSR(rD(opcode) + 32, &xt, env);
1885    float_check_status(env);
1886}
1887
1888/* VSX_DIV - VSX floating point divide
1889 *   op    - instruction mnemonic
1890 *   nels  - number of elements (1, 2 or 4)
1891 *   tp    - type (float32 or float64)
1892 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1893 *   sfprf - set FPRF
1894 */
1895#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
1896void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1897{                                                                             \
1898    ppc_vsr_t xt, xa, xb;                                                     \
1899    int i;                                                                    \
1900                                                                              \
1901    getVSR(xA(opcode), &xa, env);                                             \
1902    getVSR(xB(opcode), &xb, env);                                             \
1903    getVSR(xT(opcode), &xt, env);                                             \
1904    helper_reset_fpstatus(env);                                               \
1905                                                                              \
1906    for (i = 0; i < nels; i++) {                                              \
1907        float_status tstat = env->fp_status;                                  \
1908        set_float_exception_flags(0, &tstat);                                 \
1909        xt.fld = tp##_div(xa.fld, xb.fld, &tstat);                            \
1910        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1911                                                                              \
1912        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1913            if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {       \
1914                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf);     \
1915            } else if (tp##_is_zero(xa.fld) &&                                \
1916                tp##_is_zero(xb.fld)) {                                       \
1917                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf);     \
1918            } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||               \
1919                tp##_is_signaling_nan(xb.fld, &tstat)) {                      \
1920                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1921            }                                                                 \
1922        }                                                                     \
1923                                                                              \
1924        if (r2sp) {                                                           \
1925            xt.fld = helper_frsp(env, xt.fld);                                \
1926        }                                                                     \
1927                                                                              \
1928        if (sfprf) {                                                          \
1929            helper_compute_fprf_float64(env, xt.fld);                         \
1930        }                                                                     \
1931    }                                                                         \
1932                                                                              \
1933    putVSR(xT(opcode), &xt, env);                                             \
1934    float_check_status(env);                                                  \
1935}
1936
1937VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1938VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1939VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1940VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1941
1942void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
1943{
1944    ppc_vsr_t xt, xa, xb;
1945    float_status tstat;
1946
1947    getVSR(rA(opcode) + 32, &xa, env);
1948    getVSR(rB(opcode) + 32, &xb, env);
1949    getVSR(rD(opcode) + 32, &xt, env);
1950
1951    helper_reset_fpstatus(env);
1952    tstat = env->fp_status;
1953    if (unlikely(Rc(opcode) != 0)) {
1954        tstat.float_rounding_mode = float_round_to_odd;
1955    }
1956
1957    set_float_exception_flags(0, &tstat);
1958    xt.f128 = float128_div(xa.f128, xb.f128, &tstat);
1959    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1960
1961    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1962        if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
1963            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
1964        } else if (float128_is_zero(xa.f128) &&
1965            float128_is_zero(xb.f128)) {
1966            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
1967        } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1968            float128_is_signaling_nan(xb.f128, &tstat)) {
1969            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1970        }
1971    }
1972
1973    helper_compute_fprf_float128(env, xt.f128);
1974    putVSR(rD(opcode) + 32, &xt, env);
1975    float_check_status(env);
1976}
1977
1978/* VSX_RE  - VSX floating point reciprocal estimate
1979 *   op    - instruction mnemonic
1980 *   nels  - number of elements (1, 2 or 4)
1981 *   tp    - type (float32 or float64)
1982 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1983 *   sfprf - set FPRF
1984 */
1985#define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
1986void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1987{                                                                             \
1988    ppc_vsr_t xt, xb;                                                         \
1989    int i;                                                                    \
1990                                                                              \
1991    getVSR(xB(opcode), &xb, env);                                             \
1992    getVSR(xT(opcode), &xt, env);                                             \
1993    helper_reset_fpstatus(env);                                               \
1994                                                                              \
1995    for (i = 0; i < nels; i++) {                                              \
1996        if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
1997                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1998        }                                                                     \
1999        xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status);                 \
2000                                                                              \
2001        if (r2sp) {                                                           \
2002            xt.fld = helper_frsp(env, xt.fld);                                \
2003        }                                                                     \
2004                                                                              \
2005        if (sfprf) {                                                          \
2006            helper_compute_fprf_float64(env, xt.fld);                         \
2007        }                                                                     \
2008    }                                                                         \
2009                                                                              \
2010    putVSR(xT(opcode), &xt, env);                                             \
2011    float_check_status(env);                                                  \
2012}
2013
2014VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
2015VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
2016VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
2017VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
2018
2019/* VSX_SQRT - VSX floating point square root
2020 *   op    - instruction mnemonic
2021 *   nels  - number of elements (1, 2 or 4)
2022 *   tp    - type (float32 or float64)
2023 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2024 *   sfprf - set FPRF
2025 */
2026#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
2027void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2028{                                                                            \
2029    ppc_vsr_t xt, xb;                                                        \
2030    int i;                                                                   \
2031                                                                             \
2032    getVSR(xB(opcode), &xb, env);                                            \
2033    getVSR(xT(opcode), &xt, env);                                            \
2034    helper_reset_fpstatus(env);                                              \
2035                                                                             \
2036    for (i = 0; i < nels; i++) {                                             \
2037        float_status tstat = env->fp_status;                                 \
2038        set_float_exception_flags(0, &tstat);                                \
2039        xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2040        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2041                                                                             \
2042        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2043            if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2044                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2045            } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2046                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2047            }                                                                \
2048        }                                                                    \
2049                                                                             \
2050        if (r2sp) {                                                          \
2051            xt.fld = helper_frsp(env, xt.fld);                               \
2052        }                                                                    \
2053                                                                             \
2054        if (sfprf) {                                                         \
2055            helper_compute_fprf_float64(env, xt.fld);                        \
2056        }                                                                    \
2057    }                                                                        \
2058                                                                             \
2059    putVSR(xT(opcode), &xt, env);                                            \
2060    float_check_status(env);                                                 \
2061}
2062
2063VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2064VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2065VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2066VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2067
2068/* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2069 *   op    - instruction mnemonic
2070 *   nels  - number of elements (1, 2 or 4)
2071 *   tp    - type (float32 or float64)
2072 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2073 *   sfprf - set FPRF
2074 */
2075#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
2076void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2077{                                                                            \
2078    ppc_vsr_t xt, xb;                                                        \
2079    int i;                                                                   \
2080                                                                             \
2081    getVSR(xB(opcode), &xb, env);                                            \
2082    getVSR(xT(opcode), &xt, env);                                            \
2083    helper_reset_fpstatus(env);                                              \
2084                                                                             \
2085    for (i = 0; i < nels; i++) {                                             \
2086        float_status tstat = env->fp_status;                                 \
2087        set_float_exception_flags(0, &tstat);                                \
2088        xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2089        xt.fld = tp##_div(tp##_one, xt.fld, &tstat);                         \
2090        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2091                                                                             \
2092        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2093            if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2094                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2095            } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2096                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2097            }                                                                \
2098        }                                                                    \
2099                                                                             \
2100        if (r2sp) {                                                          \
2101            xt.fld = helper_frsp(env, xt.fld);                               \
2102        }                                                                    \
2103                                                                             \
2104        if (sfprf) {                                                         \
2105            helper_compute_fprf_float64(env, xt.fld);                        \
2106        }                                                                    \
2107    }                                                                        \
2108                                                                             \
2109    putVSR(xT(opcode), &xt, env);                                            \
2110    float_check_status(env);                                                 \
2111}
2112
2113VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2114VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2115VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2116VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2117
2118/* VSX_TDIV - VSX floating point test for divide
2119 *   op    - instruction mnemonic
2120 *   nels  - number of elements (1, 2 or 4)
2121 *   tp    - type (float32 or float64)
2122 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2123 *   emin  - minimum unbiased exponent
2124 *   emax  - maximum unbiased exponent
2125 *   nbits - number of fraction bits
2126 */
2127#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2128void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2129{                                                                       \
2130    ppc_vsr_t xa, xb;                                                   \
2131    int i;                                                              \
2132    int fe_flag = 0;                                                    \
2133    int fg_flag = 0;                                                    \
2134                                                                        \
2135    getVSR(xA(opcode), &xa, env);                                       \
2136    getVSR(xB(opcode), &xb, env);                                       \
2137                                                                        \
2138    for (i = 0; i < nels; i++) {                                        \
2139        if (unlikely(tp##_is_infinity(xa.fld) ||                        \
2140                     tp##_is_infinity(xb.fld) ||                        \
2141                     tp##_is_zero(xb.fld))) {                           \
2142            fe_flag = 1;                                                \
2143            fg_flag = 1;                                                \
2144        } else {                                                        \
2145            int e_a = ppc_##tp##_get_unbiased_exp(xa.fld);              \
2146            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2147                                                                        \
2148            if (unlikely(tp##_is_any_nan(xa.fld) ||                     \
2149                         tp##_is_any_nan(xb.fld))) {                    \
2150                fe_flag = 1;                                            \
2151            } else if ((e_b <= emin) || (e_b >= (emax-2))) {            \
2152                fe_flag = 1;                                            \
2153            } else if (!tp##_is_zero(xa.fld) &&                         \
2154                       (((e_a - e_b) >= emax) ||                        \
2155                        ((e_a - e_b) <= (emin+1)) ||                    \
2156                         (e_a <= (emin+nbits)))) {                      \
2157                fe_flag = 1;                                            \
2158            }                                                           \
2159                                                                        \
2160            if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2161                /* XB is not zero because of the above check and */     \
2162                /* so must be denormalized.                      */     \
2163                fg_flag = 1;                                            \
2164            }                                                           \
2165        }                                                               \
2166    }                                                                   \
2167                                                                        \
2168    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2169}
2170
2171VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2172VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2173VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2174
2175/* VSX_TSQRT - VSX floating point test for square root
2176 *   op    - instruction mnemonic
2177 *   nels  - number of elements (1, 2 or 4)
2178 *   tp    - type (float32 or float64)
2179 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2180 *   emin  - minimum unbiased exponent
2181 *   emax  - maximum unbiased exponent
2182 *   nbits - number of fraction bits
2183 */
2184#define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2185void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2186{                                                                       \
2187    ppc_vsr_t xa, xb;                                                   \
2188    int i;                                                              \
2189    int fe_flag = 0;                                                    \
2190    int fg_flag = 0;                                                    \
2191                                                                        \
2192    getVSR(xA(opcode), &xa, env);                                       \
2193    getVSR(xB(opcode), &xb, env);                                       \
2194                                                                        \
2195    for (i = 0; i < nels; i++) {                                        \
2196        if (unlikely(tp##_is_infinity(xb.fld) ||                        \
2197                     tp##_is_zero(xb.fld))) {                           \
2198            fe_flag = 1;                                                \
2199            fg_flag = 1;                                                \
2200        } else {                                                        \
2201            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2202                                                                        \
2203            if (unlikely(tp##_is_any_nan(xb.fld))) {                    \
2204                fe_flag = 1;                                            \
2205            } else if (unlikely(tp##_is_zero(xb.fld))) {                \
2206                fe_flag = 1;                                            \
2207            } else if (unlikely(tp##_is_neg(xb.fld))) {                 \
2208                fe_flag = 1;                                            \
2209            } else if (!tp##_is_zero(xb.fld) &&                         \
2210                      (e_b <= (emin+nbits))) {                          \
2211                fe_flag = 1;                                            \
2212            }                                                           \
2213                                                                        \
2214            if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2215                /* XB is not zero because of the above check and */     \
2216                /* therefore must be denormalized.               */     \
2217                fg_flag = 1;                                            \
2218            }                                                           \
2219        }                                                               \
2220    }                                                                   \
2221                                                                        \
2222    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2223}
2224
2225VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2226VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2227VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2228
2229/* VSX_MADD - VSX floating point muliply/add variations
2230 *   op    - instruction mnemonic
2231 *   nels  - number of elements (1, 2 or 4)
2232 *   tp    - type (float32 or float64)
2233 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2234 *   maddflgs - flags for the float*muladd routine that control the
2235 *           various forms (madd, msub, nmadd, nmsub)
2236 *   afrm  - A form (1=A, 0=M)
2237 *   sfprf - set FPRF
2238 */
2239#define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp)              \
2240void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2241{                                                                             \
2242    ppc_vsr_t xt_in, xa, xb, xt_out;                                          \
2243    ppc_vsr_t *b, *c;                                                         \
2244    int i;                                                                    \
2245                                                                              \
2246    if (afrm) { /* AxB + T */                                                 \
2247        b = &xb;                                                              \
2248        c = &xt_in;                                                           \
2249    } else { /* AxT + B */                                                    \
2250        b = &xt_in;                                                           \
2251        c = &xb;                                                              \
2252    }                                                                         \
2253                                                                              \
2254    getVSR(xA(opcode), &xa, env);                                             \
2255    getVSR(xB(opcode), &xb, env);                                             \
2256    getVSR(xT(opcode), &xt_in, env);                                          \
2257                                                                              \
2258    xt_out = xt_in;                                                           \
2259                                                                              \
2260    helper_reset_fpstatus(env);                                               \
2261                                                                              \
2262    for (i = 0; i < nels; i++) {                                              \
2263        float_status tstat = env->fp_status;                                  \
2264        set_float_exception_flags(0, &tstat);                                 \
2265        if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2266            /* Avoid double rounding errors by rounding the intermediate */   \
2267            /* result to odd.                                            */   \
2268            set_float_rounding_mode(float_round_to_zero, &tstat);             \
2269            xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2270                                       maddflgs, &tstat);                     \
2271            xt_out.fld |= (get_float_exception_flags(&tstat) &                \
2272                              float_flag_inexact) != 0;                       \
2273        } else {                                                              \
2274            xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2275                                        maddflgs, &tstat);                    \
2276        }                                                                     \
2277        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2278                                                                              \
2279        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2280            tp##_maddsub_update_excp(env, xa.fld, b->fld, c->fld, maddflgs);  \
2281        }                                                                     \
2282                                                                              \
2283        if (r2sp) {                                                           \
2284            xt_out.fld = helper_frsp(env, xt_out.fld);                        \
2285        }                                                                     \
2286                                                                              \
2287        if (sfprf) {                                                          \
2288            helper_compute_fprf_float64(env, xt_out.fld);                     \
2289        }                                                                     \
2290    }                                                                         \
2291    putVSR(xT(opcode), &xt_out, env);                                         \
2292    float_check_status(env);                                                  \
2293}
2294
2295VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
2296VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
2297VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
2298VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
2299VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
2300VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
2301VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
2302VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
2303
2304VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
2305VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
2306VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
2307VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
2308VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
2309VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
2310VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
2311VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
2312
2313VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
2314VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
2315VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
2316VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
2317VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
2318VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
2319VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
2320VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
2321
2322VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
2323VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
2324VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
2325VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
2326VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
2327VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
2328VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
2329VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
2330
2331/* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2332 *   op    - instruction mnemonic
2333 *   cmp   - comparison operation
2334 *   exp   - expected result of comparison
2335 *   svxvc - set VXVC bit
2336 */
2337#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc)                                \
2338void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2339{                                                                             \
2340    ppc_vsr_t xt, xa, xb;                                                     \
2341    bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false;            \
2342                                                                              \
2343    getVSR(xA(opcode), &xa, env);                                             \
2344    getVSR(xB(opcode), &xb, env);                                             \
2345    getVSR(xT(opcode), &xt, env);                                             \
2346                                                                              \
2347    if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||              \
2348        float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {              \
2349        vxsnan_flag = true;                                                   \
2350        if (fpscr_ve == 0 && svxvc) {                                         \
2351            vxvc_flag = true;                                                 \
2352        }                                                                     \
2353    } else if (svxvc) {                                                       \
2354        vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2355            float64_is_quiet_nan(xb.VsrD(0), &env->fp_status);                \
2356    }                                                                         \
2357    if (vxsnan_flag) {                                                        \
2358        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);                \
2359    }                                                                         \
2360    if (vxvc_flag) {                                                          \
2361        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);                  \
2362    }                                                                         \
2363    vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag);                        \
2364                                                                              \
2365    if (!vex_flag) {                                                          \
2366        if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) {  \
2367            xt.VsrD(0) = -1;                                                  \
2368            xt.VsrD(1) = 0;                                                   \
2369        } else {                                                              \
2370            xt.VsrD(0) = 0;                                                   \
2371            xt.VsrD(1) = 0;                                                   \
2372        }                                                                     \
2373    }                                                                         \
2374    putVSR(xT(opcode), &xt, env);                                             \
2375    helper_float_check_status(env);                                           \
2376}
2377
2378VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2379VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2380VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2381VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2382
2383void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode)
2384{
2385    ppc_vsr_t xa, xb;
2386    int64_t exp_a, exp_b;
2387    uint32_t cc;
2388
2389    getVSR(xA(opcode), &xa, env);
2390    getVSR(xB(opcode), &xb, env);
2391
2392    exp_a = extract64(xa.VsrD(0), 52, 11);
2393    exp_b = extract64(xb.VsrD(0), 52, 11);
2394
2395    if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||
2396                 float64_is_any_nan(xb.VsrD(0)))) {
2397        cc = CRF_SO;
2398    } else {
2399        if (exp_a < exp_b) {
2400            cc = CRF_LT;
2401        } else if (exp_a > exp_b) {
2402            cc = CRF_GT;
2403        } else {
2404            cc = CRF_EQ;
2405        }
2406    }
2407
2408    env->fpscr &= ~(0x0F << FPSCR_FPRF);
2409    env->fpscr |= cc << FPSCR_FPRF;
2410    env->crf[BF(opcode)] = cc;
2411
2412    helper_float_check_status(env);
2413}
2414
2415void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode)
2416{
2417    ppc_vsr_t xa, xb;
2418    int64_t exp_a, exp_b;
2419    uint32_t cc;
2420
2421    getVSR(rA(opcode) + 32, &xa, env);
2422    getVSR(rB(opcode) + 32, &xb, env);
2423
2424    exp_a = extract64(xa.VsrD(0), 48, 15);
2425    exp_b = extract64(xb.VsrD(0), 48, 15);
2426
2427    if (unlikely(float128_is_any_nan(xa.f128) ||
2428                 float128_is_any_nan(xb.f128))) {
2429        cc = CRF_SO;
2430    } else {
2431        if (exp_a < exp_b) {
2432            cc = CRF_LT;
2433        } else if (exp_a > exp_b) {
2434            cc = CRF_GT;
2435        } else {
2436            cc = CRF_EQ;
2437        }
2438    }
2439
2440    env->fpscr &= ~(0x0F << FPSCR_FPRF);
2441    env->fpscr |= cc << FPSCR_FPRF;
2442    env->crf[BF(opcode)] = cc;
2443
2444    helper_float_check_status(env);
2445}
2446
2447#define VSX_SCALAR_CMP(op, ordered)                                      \
2448void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
2449{                                                                        \
2450    ppc_vsr_t xa, xb;                                                    \
2451    uint32_t cc = 0;                                                     \
2452    bool vxsnan_flag = false, vxvc_flag = false;                         \
2453                                                                         \
2454    helper_reset_fpstatus(env);                                          \
2455    getVSR(xA(opcode), &xa, env);                                        \
2456    getVSR(xB(opcode), &xb, env);                                        \
2457                                                                         \
2458    if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||         \
2459        float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {         \
2460        vxsnan_flag = true;                                              \
2461        cc = CRF_SO;                                                     \
2462        if (fpscr_ve == 0 && ordered) {                                  \
2463            vxvc_flag = true;                                            \
2464        }                                                                \
2465    } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2466               float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) {      \
2467        cc = CRF_SO;                                                     \
2468        if (ordered) {                                                   \
2469            vxvc_flag = true;                                            \
2470        }                                                                \
2471    }                                                                    \
2472    if (vxsnan_flag) {                                                   \
2473        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);           \
2474    }                                                                    \
2475    if (vxvc_flag) {                                                     \
2476        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);             \
2477    }                                                                    \
2478                                                                         \
2479    if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {           \
2480        cc |= CRF_LT;                                                    \
2481    } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {   \
2482        cc |= CRF_GT;                                                    \
2483    } else {                                                             \
2484        cc |= CRF_EQ;                                                    \
2485    }                                                                    \
2486                                                                         \
2487    env->fpscr &= ~(0x0F << FPSCR_FPRF);                                 \
2488    env->fpscr |= cc << FPSCR_FPRF;                                      \
2489    env->crf[BF(opcode)] = cc;                                           \
2490                                                                         \
2491    float_check_status(env);                                             \
2492}
2493
2494VSX_SCALAR_CMP(xscmpodp, 1)
2495VSX_SCALAR_CMP(xscmpudp, 0)
2496
2497#define VSX_SCALAR_CMPQ(op, ordered)                                    \
2498void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2499{                                                                       \
2500    ppc_vsr_t xa, xb;                                                   \
2501    uint32_t cc = 0;                                                    \
2502    bool vxsnan_flag = false, vxvc_flag = false;                        \
2503                                                                        \
2504    helper_reset_fpstatus(env);                                         \
2505    getVSR(rA(opcode) + 32, &xa, env);                                  \
2506    getVSR(rB(opcode) + 32, &xb, env);                                  \
2507                                                                        \
2508    if (float128_is_signaling_nan(xa.f128, &env->fp_status) ||          \
2509        float128_is_signaling_nan(xb.f128, &env->fp_status)) {          \
2510        vxsnan_flag = true;                                             \
2511        cc = CRF_SO;                                                    \
2512        if (fpscr_ve == 0 && ordered) {                                 \
2513            vxvc_flag = true;                                           \
2514        }                                                               \
2515    } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) ||       \
2516               float128_is_quiet_nan(xb.f128, &env->fp_status)) {       \
2517        cc = CRF_SO;                                                    \
2518        if (ordered) {                                                  \
2519            vxvc_flag = true;                                           \
2520        }                                                               \
2521    }                                                                   \
2522    if (vxsnan_flag) {                                                  \
2523        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);          \
2524    }                                                                   \
2525    if (vxvc_flag) {                                                    \
2526        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);            \
2527    }                                                                   \
2528                                                                        \
2529    if (float128_lt(xa.f128, xb.f128, &env->fp_status)) {               \
2530        cc |= CRF_LT;                                                   \
2531    } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) {       \
2532        cc |= CRF_GT;                                                   \
2533    } else {                                                            \
2534        cc |= CRF_EQ;                                                   \
2535    }                                                                   \
2536                                                                        \
2537    env->fpscr &= ~(0x0F << FPSCR_FPRF);                                \
2538    env->fpscr |= cc << FPSCR_FPRF;                                     \
2539    env->crf[BF(opcode)] = cc;                                          \
2540                                                                        \
2541    float_check_status(env);                                            \
2542}
2543
2544VSX_SCALAR_CMPQ(xscmpoqp, 1)
2545VSX_SCALAR_CMPQ(xscmpuqp, 0)
2546
2547/* VSX_MAX_MIN - VSX floating point maximum/minimum
2548 *   name  - instruction mnemonic
2549 *   op    - operation (max or min)
2550 *   nels  - number of elements (1, 2 or 4)
2551 *   tp    - type (float32 or float64)
2552 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2553 */
2554#define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2555void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2556{                                                                             \
2557    ppc_vsr_t xt, xa, xb;                                                     \
2558    int i;                                                                    \
2559                                                                              \
2560    getVSR(xA(opcode), &xa, env);                                             \
2561    getVSR(xB(opcode), &xb, env);                                             \
2562    getVSR(xT(opcode), &xt, env);                                             \
2563                                                                              \
2564    for (i = 0; i < nels; i++) {                                              \
2565        xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status);                  \
2566        if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) ||        \
2567                     tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
2568            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2569        }                                                                     \
2570    }                                                                         \
2571                                                                              \
2572    putVSR(xT(opcode), &xt, env);                                             \
2573    float_check_status(env);                                                  \
2574}
2575
2576VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2577VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2578VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2579VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2580VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2581VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2582
2583#define VSX_MAX_MINC(name, max)                                               \
2584void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2585{                                                                             \
2586    ppc_vsr_t xt, xa, xb;                                                     \
2587    bool vxsnan_flag = false, vex_flag = false;                               \
2588                                                                              \
2589    getVSR(rA(opcode) + 32, &xa, env);                                        \
2590    getVSR(rB(opcode) + 32, &xb, env);                                        \
2591    getVSR(rD(opcode) + 32, &xt, env);                                        \
2592                                                                              \
2593    if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||                            \
2594                 float64_is_any_nan(xb.VsrD(0)))) {                           \
2595        if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||          \
2596            float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
2597            vxsnan_flag = true;                                               \
2598        }                                                                     \
2599        xt.VsrD(0) = xb.VsrD(0);                                              \
2600    } else if ((max &&                                                        \
2601               !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) ||       \
2602               (!max &&                                                       \
2603               float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) {        \
2604        xt.VsrD(0) = xa.VsrD(0);                                              \
2605    } else {                                                                  \
2606        xt.VsrD(0) = xb.VsrD(0);                                              \
2607    }                                                                         \
2608                                                                              \
2609    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2610    if (vxsnan_flag) {                                                        \
2611            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2612    }                                                                         \
2613    if (!vex_flag) {                                                          \
2614        putVSR(rD(opcode) + 32, &xt, env);                                    \
2615    }                                                                         \
2616}                                                                             \
2617
2618VSX_MAX_MINC(xsmaxcdp, 1);
2619VSX_MAX_MINC(xsmincdp, 0);
2620
2621#define VSX_MAX_MINJ(name, max)                                               \
2622void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2623{                                                                             \
2624    ppc_vsr_t xt, xa, xb;                                                     \
2625    bool vxsnan_flag = false, vex_flag = false;                               \
2626                                                                              \
2627    getVSR(rA(opcode) + 32, &xa, env);                                        \
2628    getVSR(rB(opcode) + 32, &xb, env);                                        \
2629    getVSR(rD(opcode) + 32, &xt, env);                                        \
2630                                                                              \
2631    if (unlikely(float64_is_any_nan(xa.VsrD(0)))) {                           \
2632        if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) {          \
2633            vxsnan_flag = true;                                               \
2634        }                                                                     \
2635        xt.VsrD(0) = xa.VsrD(0);                                              \
2636    } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) {                    \
2637        if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
2638            vxsnan_flag = true;                                               \
2639        }                                                                     \
2640        xt.VsrD(0) = xb.VsrD(0);                                              \
2641    } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) {  \
2642        if (max) {                                                            \
2643            if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2644                xt.VsrD(0) = 0ULL;                                            \
2645            } else {                                                          \
2646                xt.VsrD(0) = 0x8000000000000000ULL;                           \
2647            }                                                                 \
2648        } else {                                                              \
2649            if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) {   \
2650                xt.VsrD(0) = 0x8000000000000000ULL;                           \
2651            } else {                                                          \
2652                xt.VsrD(0) = 0ULL;                                            \
2653            }                                                                 \
2654        }                                                                     \
2655    } else if ((max &&                                                        \
2656               !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) ||       \
2657               (!max &&                                                       \
2658               float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) {        \
2659        xt.VsrD(0) = xa.VsrD(0);                                              \
2660    } else {                                                                  \
2661        xt.VsrD(0) = xb.VsrD(0);                                              \
2662    }                                                                         \
2663                                                                              \
2664    vex_flag = fpscr_ve & vxsnan_flag;                                        \
2665    if (vxsnan_flag) {                                                        \
2666            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2667    }                                                                         \
2668    if (!vex_flag) {                                                          \
2669        putVSR(rD(opcode) + 32, &xt, env);                                    \
2670    }                                                                         \
2671}                                                                             \
2672
2673VSX_MAX_MINJ(xsmaxjdp, 1);
2674VSX_MAX_MINJ(xsminjdp, 0);
2675
2676/* VSX_CMP - VSX floating point compare
2677 *   op    - instruction mnemonic
2678 *   nels  - number of elements (1, 2 or 4)
2679 *   tp    - type (float32 or float64)
2680 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2681 *   cmp   - comparison operation
2682 *   svxvc - set VXVC bit
2683 *   exp   - expected result of comparison
2684 */
2685#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
2686void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2687{                                                                         \
2688    ppc_vsr_t xt, xa, xb;                                                 \
2689    int i;                                                                \
2690    int all_true = 1;                                                     \
2691    int all_false = 1;                                                    \
2692                                                                          \
2693    getVSR(xA(opcode), &xa, env);                                         \
2694    getVSR(xB(opcode), &xb, env);                                         \
2695    getVSR(xT(opcode), &xt, env);                                         \
2696                                                                          \
2697    for (i = 0; i < nels; i++) {                                          \
2698        if (unlikely(tp##_is_any_nan(xa.fld) ||                           \
2699                     tp##_is_any_nan(xb.fld))) {                          \
2700            if (tp##_is_signaling_nan(xa.fld, &env->fp_status) ||         \
2701                tp##_is_signaling_nan(xb.fld, &env->fp_status)) {         \
2702                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);    \
2703            }                                                             \
2704            if (svxvc) {                                                  \
2705                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);      \
2706            }                                                             \
2707            xt.fld = 0;                                                   \
2708            all_true = 0;                                                 \
2709        } else {                                                          \
2710            if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) {     \
2711                xt.fld = -1;                                              \
2712                all_false = 0;                                            \
2713            } else {                                                      \
2714                xt.fld = 0;                                               \
2715                all_true = 0;                                             \
2716            }                                                             \
2717        }                                                                 \
2718    }                                                                     \
2719                                                                          \
2720    putVSR(xT(opcode), &xt, env);                                         \
2721    if ((opcode >> (31-21)) & 1) {                                        \
2722        env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);       \
2723    }                                                                     \
2724    float_check_status(env);                                              \
2725 }
2726
2727VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2728VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2729VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2730VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2731VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2732VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2733VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2734VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2735
2736/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2737 *   op    - instruction mnemonic
2738 *   nels  - number of elements (1, 2 or 4)
2739 *   stp   - source type (float32 or float64)
2740 *   ttp   - target type (float32 or float64)
2741 *   sfld  - source vsr_t field
2742 *   tfld  - target vsr_t field (f32 or f64)
2743 *   sfprf - set FPRF
2744 */
2745#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2746void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2747{                                                                  \
2748    ppc_vsr_t xt, xb;                                              \
2749    int i;                                                         \
2750                                                                   \
2751    getVSR(xB(opcode), &xb, env);                                  \
2752    getVSR(xT(opcode), &xt, env);                                  \
2753                                                                   \
2754    for (i = 0; i < nels; i++) {                                   \
2755        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);        \
2756        if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2757                                            &env->fp_status))) {   \
2758            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2759            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2760        }                                                          \
2761        if (sfprf) {                                               \
2762            helper_compute_fprf_##ttp(env, xt.tfld);               \
2763        }                                                          \
2764    }                                                              \
2765                                                                   \
2766    putVSR(xT(opcode), &xt, env);                                  \
2767    float_check_status(env);                                       \
2768}
2769
2770VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2771VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2772VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
2773VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
2774
2775/* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2776 *   op    - instruction mnemonic
2777 *   nels  - number of elements (1, 2 or 4)
2778 *   stp   - source type (float32 or float64)
2779 *   ttp   - target type (float32 or float64)
2780 *   sfld  - source vsr_t field
2781 *   tfld  - target vsr_t field (f32 or f64)
2782 *   sfprf - set FPRF
2783 */
2784#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2785void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2786{                                                                       \
2787    ppc_vsr_t xt, xb;                                                   \
2788    int i;                                                              \
2789                                                                        \
2790    getVSR(rB(opcode) + 32, &xb, env);                                  \
2791    getVSR(rD(opcode) + 32, &xt, env);                                  \
2792                                                                        \
2793    for (i = 0; i < nels; i++) {                                        \
2794        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
2795        if (unlikely(stp##_is_signaling_nan(xb.sfld,                    \
2796                                            &env->fp_status))) {        \
2797            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);      \
2798            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                      \
2799        }                                                               \
2800        if (sfprf) {                                                    \
2801            helper_compute_fprf_##ttp(env, xt.tfld);                    \
2802        }                                                               \
2803    }                                                                   \
2804                                                                        \
2805    putVSR(rD(opcode) + 32, &xt, env);                                  \
2806    float_check_status(env);                                            \
2807}
2808
2809VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2810
2811/* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2812 *                       involving one half precision value
2813 *   op    - instruction mnemonic
2814 *   nels  - number of elements (1, 2 or 4)
2815 *   stp   - source type
2816 *   ttp   - target type
2817 *   sfld  - source vsr_t field
2818 *   tfld  - target vsr_t field
2819 *   sfprf - set FPRF
2820 */
2821#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2822void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2823{                                                                  \
2824    ppc_vsr_t xt, xb;                                              \
2825    int i;                                                         \
2826                                                                   \
2827    getVSR(xB(opcode), &xb, env);                                  \
2828    memset(&xt, 0, sizeof(xt));                                    \
2829                                                                   \
2830    for (i = 0; i < nels; i++) {                                   \
2831        xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status);     \
2832        if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2833                                            &env->fp_status))) {   \
2834            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2835            xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2836        }                                                          \
2837        if (sfprf) {                                               \
2838            helper_compute_fprf_##ttp(env, xt.tfld);               \
2839        }                                                          \
2840    }                                                              \
2841                                                                   \
2842    putVSR(xT(opcode), &xt, env);                                  \
2843    float_check_status(env);                                       \
2844}
2845
2846VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2847VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2848VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i  + 1), 0)
2849VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2850
2851/*
2852 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2853 * added to this later.
2854 */
2855void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode)
2856{
2857    ppc_vsr_t xt, xb;
2858    float_status tstat;
2859
2860    getVSR(rB(opcode) + 32, &xb, env);
2861    memset(&xt, 0, sizeof(xt));
2862
2863    tstat = env->fp_status;
2864    if (unlikely(Rc(opcode) != 0)) {
2865        tstat.float_rounding_mode = float_round_to_odd;
2866    }
2867
2868    xt.VsrD(0) = float128_to_float64(xb.f128, &tstat);
2869    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2870    if (unlikely(float128_is_signaling_nan(xb.f128,
2871                                           &tstat))) {
2872        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
2873        xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0));
2874    }
2875    helper_compute_fprf_float64(env, xt.VsrD(0));
2876
2877    putVSR(rD(opcode) + 32, &xt, env);
2878    float_check_status(env);
2879}
2880
2881uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2882{
2883    float_status tstat = env->fp_status;
2884    set_float_exception_flags(0, &tstat);
2885
2886    return (uint64_t)float64_to_float32(xb, &tstat) << 32;
2887}
2888
2889uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2890{
2891    float_status tstat = env->fp_status;
2892    set_float_exception_flags(0, &tstat);
2893
2894    return float32_to_float64(xb >> 32, &tstat);
2895}
2896
2897/* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2898 *   op    - instruction mnemonic
2899 *   nels  - number of elements (1, 2 or 4)
2900 *   stp   - source type (float32 or float64)
2901 *   ttp   - target type (int32, uint32, int64 or uint64)
2902 *   sfld  - source vsr_t field
2903 *   tfld  - target vsr_t field
2904 *   rnan  - resulting NaN
2905 */
2906#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
2907void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2908{                                                                            \
2909    ppc_vsr_t xt, xb;                                                        \
2910    int i;                                                                   \
2911                                                                             \
2912    getVSR(xB(opcode), &xb, env);                                            \
2913    getVSR(xT(opcode), &xt, env);                                            \
2914                                                                             \
2915    for (i = 0; i < nels; i++) {                                             \
2916        if (unlikely(stp##_is_any_nan(xb.sfld))) {                           \
2917            if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) {          \
2918                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);       \
2919            }                                                                \
2920            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
2921            xt.tfld = rnan;                                                  \
2922        } else {                                                             \
2923            xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                \
2924                          &env->fp_status);                                  \
2925            if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2926                float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);        \
2927            }                                                                \
2928        }                                                                    \
2929    }                                                                        \
2930                                                                             \
2931    putVSR(xT(opcode), &xt, env);                                            \
2932    float_check_status(env);                                                 \
2933}
2934
2935VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2936                  0x8000000000000000ULL)
2937VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
2938                  0x80000000U)
2939VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
2940VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
2941VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
2942                  0x8000000000000000ULL)
2943VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
2944                  0x80000000U)
2945VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
2946VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
2947VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
2948                  0x8000000000000000ULL)
2949VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
2950VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
2951VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
2952
2953/* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2954 *   op    - instruction mnemonic
2955 *   stp   - source type (float32 or float64)
2956 *   ttp   - target type (int32, uint32, int64 or uint64)
2957 *   sfld  - source vsr_t field
2958 *   tfld  - target vsr_t field
2959 *   rnan  - resulting NaN
2960 */
2961#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan)             \
2962void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2963{                                                                            \
2964    ppc_vsr_t xt, xb;                                                        \
2965                                                                             \
2966    getVSR(rB(opcode) + 32, &xb, env);                                       \
2967    memset(&xt, 0, sizeof(xt));                                              \
2968                                                                             \
2969    if (unlikely(stp##_is_any_nan(xb.sfld))) {                               \
2970        if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) {              \
2971            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);           \
2972        }                                                                    \
2973        float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);                \
2974        xt.tfld = rnan;                                                      \
2975    } else {                                                                 \
2976        xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                    \
2977                      &env->fp_status);                                      \
2978        if (env->fp_status.float_exception_flags & float_flag_invalid) {     \
2979            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
2980        }                                                                    \
2981    }                                                                        \
2982                                                                             \
2983    putVSR(rD(opcode) + 32, &xt, env);                                       \
2984    float_check_status(env);                                                 \
2985}
2986
2987VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0),          \
2988                  0x8000000000000000ULL)
2989
2990VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0),          \
2991                  0xffffffff80000000ULL)
2992VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
2993VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
2994
2995/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2996 *   op    - instruction mnemonic
2997 *   nels  - number of elements (1, 2 or 4)
2998 *   stp   - source type (int32, uint32, int64 or uint64)
2999 *   ttp   - target type (float32 or float64)
3000 *   sfld  - source vsr_t field
3001 *   tfld  - target vsr_t field
3002 *   jdef  - definition of the j index (i or 2*i)
3003 *   sfprf - set FPRF
3004 */
3005#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
3006void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
3007{                                                                       \
3008    ppc_vsr_t xt, xb;                                                   \
3009    int i;                                                              \
3010                                                                        \
3011    getVSR(xB(opcode), &xb, env);                                       \
3012    getVSR(xT(opcode), &xt, env);                                       \
3013                                                                        \
3014    for (i = 0; i < nels; i++) {                                        \
3015        xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
3016        if (r2sp) {                                                     \
3017            xt.tfld = helper_frsp(env, xt.tfld);                        \
3018        }                                                               \
3019        if (sfprf) {                                                    \
3020            helper_compute_fprf_float64(env, xt.tfld);                  \
3021        }                                                               \
3022    }                                                                   \
3023                                                                        \
3024    putVSR(xT(opcode), &xt, env);                                       \
3025    float_check_status(env);                                            \
3026}
3027
3028VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3029VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3030VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3031VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3032VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3033VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3034VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
3035VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
3036VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
3037VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
3038VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3039VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3040
3041/* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3042 *   op    - instruction mnemonic
3043 *   stp   - source type (int32, uint32, int64 or uint64)
3044 *   ttp   - target type (float32 or float64)
3045 *   sfld  - source vsr_t field
3046 *   tfld  - target vsr_t field
3047 */
3048#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld)              \
3049void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
3050{                                                                       \
3051    ppc_vsr_t xt, xb;                                                   \
3052                                                                        \
3053    getVSR(rB(opcode) + 32, &xb, env);                                  \
3054    getVSR(rD(opcode) + 32, &xt, env);                                  \
3055                                                                        \
3056    xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);                 \
3057    helper_compute_fprf_##ttp(env, xt.tfld);                            \
3058                                                                        \
3059    putVSR(xT(opcode) + 32, &xt, env);                                  \
3060    float_check_status(env);                                            \
3061}
3062
3063VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3064VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3065
3066/* For "use current rounding mode", define a value that will not be one of
3067 * the existing rounding model enums.
3068 */
3069#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3070  float_round_up + float_round_to_zero)
3071
3072/* VSX_ROUND - VSX floating point round
3073 *   op    - instruction mnemonic
3074 *   nels  - number of elements (1, 2 or 4)
3075 *   tp    - type (float32 or float64)
3076 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3077 *   rmode - rounding mode
3078 *   sfprf - set FPRF
3079 */
3080#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf)                     \
3081void helper_##op(CPUPPCState *env, uint32_t opcode)                    \
3082{                                                                      \
3083    ppc_vsr_t xt, xb;                                                  \
3084    int i;                                                             \
3085    getVSR(xB(opcode), &xb, env);                                      \
3086    getVSR(xT(opcode), &xt, env);                                      \
3087                                                                       \
3088    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3089        set_float_rounding_mode(rmode, &env->fp_status);               \
3090    }                                                                  \
3091                                                                       \
3092    for (i = 0; i < nels; i++) {                                       \
3093        if (unlikely(tp##_is_signaling_nan(xb.fld,                     \
3094                                           &env->fp_status))) {        \
3095            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);     \
3096            xt.fld = tp##_snan_to_qnan(xb.fld);                        \
3097        } else {                                                       \
3098            xt.fld = tp##_round_to_int(xb.fld, &env->fp_status);       \
3099        }                                                              \
3100        if (sfprf) {                                                   \
3101            helper_compute_fprf_float64(env, xt.fld);                  \
3102        }                                                              \
3103    }                                                                  \
3104                                                                       \
3105    /* If this is not a "use current rounding mode" instruction,       \
3106     * then inhibit setting of the XX bit and restore rounding         \
3107     * mode from FPSCR */                                              \
3108    if (rmode != FLOAT_ROUND_CURRENT) {                                \
3109        fpscr_set_rounding_mode(env);                                  \
3110        env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
3111    }                                                                  \
3112                                                                       \
3113    putVSR(xT(opcode), &xt, env);                                      \
3114    float_check_status(env);                                           \
3115}
3116
3117VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3118VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3119VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3120VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3121VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3122
3123VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3124VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3125VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3126VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3127VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3128
3129VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3130VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3131VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3132VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3133VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3134
3135uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3136{
3137    helper_reset_fpstatus(env);
3138
3139    uint64_t xt = helper_frsp(env, xb);
3140
3141    helper_compute_fprf_float64(env, xt);
3142    float_check_status(env);
3143    return xt;
3144}
3145
3146#define VSX_XXPERM(op, indexed)                                       \
3147void helper_##op(CPUPPCState *env, uint32_t opcode)                   \
3148{                                                                     \
3149    ppc_vsr_t xt, xa, pcv, xto;                                       \
3150    int i, idx;                                                       \
3151                                                                      \
3152    getVSR(xA(opcode), &xa, env);                                     \
3153    getVSR(xT(opcode), &xt, env);                                     \
3154    getVSR(xB(opcode), &pcv, env);                                    \
3155                                                                      \
3156    for (i = 0; i < 16; i++) {                                        \
3157        idx = pcv.VsrB(i) & 0x1F;                                     \
3158        if (indexed) {                                                \
3159            idx = 31 - idx;                                           \
3160        }                                                             \
3161        xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3162    }                                                                 \
3163    putVSR(xT(opcode), &xto, env);                                    \
3164}
3165
3166VSX_XXPERM(xxperm, 0)
3167VSX_XXPERM(xxpermr, 1)
3168
3169void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
3170{
3171    ppc_vsr_t xt, xb;
3172    uint32_t exp, i, fraction;
3173
3174    getVSR(xB(opcode), &xb, env);
3175    memset(&xt, 0, sizeof(xt));
3176
3177    for (i = 0; i < 4; i++) {
3178        exp = (xb.VsrW(i) >> 23) & 0xFF;
3179        fraction = xb.VsrW(i) & 0x7FFFFF;
3180        if (exp != 0 && exp != 255) {
3181            xt.VsrW(i) = fraction | 0x00800000;
3182        } else {
3183            xt.VsrW(i) = fraction;
3184        }
3185    }
3186    putVSR(xT(opcode), &xt, env);
3187}
3188
3189/* VSX_TEST_DC - VSX floating point test data class
3190 *   op    - instruction mnemonic
3191 *   nels  - number of elements (1, 2 or 4)
3192 *   xbn   - VSR register number
3193 *   tp    - type (float32 or float64)
3194 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3195 *   tfld   - target vsr_t field (VsrD(*) or VsrW(*))
3196 *   fld_max - target field max
3197 *   scrf - set result in CR and FPCC
3198 */
3199#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf)  \
3200void helper_##op(CPUPPCState *env, uint32_t opcode)         \
3201{                                                           \
3202    ppc_vsr_t xt, xb;                                       \
3203    uint32_t i, sign, dcmx;                                 \
3204    uint32_t cc, match = 0;                                 \
3205                                                            \
3206    getVSR(xbn, &xb, env);                                  \
3207    if (!scrf) {                                            \
3208        memset(&xt, 0, sizeof(xt));                         \
3209        dcmx = DCMX_XV(opcode);                             \
3210    } else {                                                \
3211        dcmx = DCMX(opcode);                                \
3212    }                                                       \
3213                                                            \
3214    for (i = 0; i < nels; i++) {                            \
3215        sign = tp##_is_neg(xb.fld);                         \
3216        if (tp##_is_any_nan(xb.fld)) {                      \
3217            match = extract32(dcmx, 6, 1);                  \
3218        } else if (tp##_is_infinity(xb.fld)) {              \
3219            match = extract32(dcmx, 4 + !sign, 1);          \
3220        } else if (tp##_is_zero(xb.fld)) {                  \
3221            match = extract32(dcmx, 2 + !sign, 1);          \
3222        } else if (tp##_is_zero_or_denormal(xb.fld)) {      \
3223            match = extract32(dcmx, 0 + !sign, 1);          \
3224        }                                                   \
3225                                                            \
3226        if (scrf) {                                         \
3227            cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT;  \
3228            env->fpscr &= ~(0x0F << FPSCR_FPRF);            \
3229            env->fpscr |= cc << FPSCR_FPRF;                 \
3230            env->crf[BF(opcode)] = cc;                      \
3231        } else {                                            \
3232            xt.tfld = match ? fld_max : 0;                  \
3233        }                                                   \
3234        match = 0;                                          \
3235    }                                                       \
3236    if (!scrf) {                                            \
3237        putVSR(xT(opcode), &xt, env);                       \
3238    }                                                       \
3239}
3240
3241VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3242VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3243VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3244VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3245
3246void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
3247{
3248    ppc_vsr_t xb;
3249    uint32_t dcmx, sign, exp;
3250    uint32_t cc, match = 0, not_sp = 0;
3251
3252    getVSR(xB(opcode), &xb, env);
3253    dcmx = DCMX(opcode);
3254    exp = (xb.VsrD(0) >> 52) & 0x7FF;
3255
3256    sign = float64_is_neg(xb.VsrD(0));
3257    if (float64_is_any_nan(xb.VsrD(0))) {
3258        match = extract32(dcmx, 6, 1);
3259    } else if (float64_is_infinity(xb.VsrD(0))) {
3260        match = extract32(dcmx, 4 + !sign, 1);
3261    } else if (float64_is_zero(xb.VsrD(0))) {
3262        match = extract32(dcmx, 2 + !sign, 1);
3263    } else if (float64_is_zero_or_denormal(xb.VsrD(0)) ||
3264               (exp > 0 && exp < 0x381)) {
3265        match = extract32(dcmx, 0 + !sign, 1);
3266    }
3267
3268    not_sp = !float64_eq(xb.VsrD(0),
3269                         float32_to_float64(
3270                             float64_to_float32(xb.VsrD(0), &env->fp_status),
3271                             &env->fp_status), &env->fp_status);
3272
3273    cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3274    env->fpscr &= ~(0x0F << FPSCR_FPRF);
3275    env->fpscr |= cc << FPSCR_FPRF;
3276    env->crf[BF(opcode)] = cc;
3277}
3278
3279void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
3280{
3281    ppc_vsr_t xb;
3282    ppc_vsr_t xt;
3283    uint8_t r = Rrm(opcode);
3284    uint8_t ex = Rc(opcode);
3285    uint8_t rmc = RMC(opcode);
3286    uint8_t rmode = 0;
3287    float_status tstat;
3288
3289    getVSR(rB(opcode) + 32, &xb, env);
3290    memset(&xt, 0, sizeof(xt));
3291    helper_reset_fpstatus(env);
3292
3293    if (r == 0 && rmc == 0) {
3294        rmode = float_round_ties_away;
3295    } else if (r == 0 && rmc == 0x3) {
3296        rmode = fpscr_rn;
3297    } else if (r == 1) {
3298        switch (rmc) {
3299        case 0:
3300            rmode = float_round_nearest_even;
3301            break;
3302        case 1:
3303            rmode = float_round_to_zero;
3304            break;
3305        case 2:
3306            rmode = float_round_up;
3307            break;
3308        case 3:
3309            rmode = float_round_down;
3310            break;
3311        default:
3312            abort();
3313        }
3314    }
3315
3316    tstat = env->fp_status;
3317    set_float_exception_flags(0, &tstat);
3318    set_float_rounding_mode(rmode, &tstat);
3319    xt.f128 = float128_round_to_int(xb.f128, &tstat);
3320    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3321
3322    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3323        if (float128_is_signaling_nan(xb.f128, &tstat)) {
3324            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
3325            xt.f128 = float128_snan_to_qnan(xt.f128);
3326        }
3327    }
3328
3329    if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3330        env->fp_status.float_exception_flags &= ~float_flag_inexact;
3331    }
3332
3333    helper_compute_fprf_float128(env, xt.f128);
3334    float_check_status(env);
3335    putVSR(rD(opcode) + 32, &xt, env);
3336}
3337
3338void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
3339{
3340    ppc_vsr_t xb;
3341    ppc_vsr_t xt;
3342    uint8_t r = Rrm(opcode);
3343    uint8_t rmc = RMC(opcode);
3344    uint8_t rmode = 0;
3345    floatx80 round_res;
3346    float_status tstat;
3347
3348    getVSR(rB(opcode) + 32, &xb, env);
3349    memset(&xt, 0, sizeof(xt));
3350    helper_reset_fpstatus(env);
3351
3352    if (r == 0 && rmc == 0) {
3353        rmode = float_round_ties_away;
3354    } else if (r == 0 && rmc == 0x3) {
3355        rmode = fpscr_rn;
3356    } else if (r == 1) {
3357        switch (rmc) {
3358        case 0:
3359            rmode = float_round_nearest_even;
3360            break;
3361        case 1:
3362            rmode = float_round_to_zero;
3363            break;
3364        case 2:
3365            rmode = float_round_up;
3366            break;
3367        case 3:
3368            rmode = float_round_down;
3369            break;
3370        default:
3371            abort();
3372        }
3373    }
3374
3375    tstat = env->fp_status;
3376    set_float_exception_flags(0, &tstat);
3377    set_float_rounding_mode(rmode, &tstat);
3378    round_res = float128_to_floatx80(xb.f128, &tstat);
3379    xt.f128 = floatx80_to_float128(round_res, &tstat);
3380    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3381
3382    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3383        if (float128_is_signaling_nan(xb.f128, &tstat)) {
3384            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
3385            xt.f128 = float128_snan_to_qnan(xt.f128);
3386        }
3387    }
3388
3389    helper_compute_fprf_float128(env, xt.f128);
3390    putVSR(rD(opcode) + 32, &xt, env);
3391    float_check_status(env);
3392}
3393
3394void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
3395{
3396    ppc_vsr_t xb;
3397    ppc_vsr_t xt;
3398    float_status tstat;
3399
3400    getVSR(rB(opcode) + 32, &xb, env);
3401    memset(&xt, 0, sizeof(xt));
3402    helper_reset_fpstatus(env);
3403
3404    tstat = env->fp_status;
3405    if (unlikely(Rc(opcode) != 0)) {
3406        tstat.float_rounding_mode = float_round_to_odd;
3407    }
3408
3409    set_float_exception_flags(0, &tstat);
3410    xt.f128 = float128_sqrt(xb.f128, &tstat);
3411    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3412
3413    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3414        if (float128_is_signaling_nan(xb.f128, &tstat)) {
3415            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
3416            xt.f128 = float128_snan_to_qnan(xb.f128);
3417        } else if  (float128_is_quiet_nan(xb.f128, &tstat)) {
3418            xt.f128 = xb.f128;
3419        } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) {
3420            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
3421            xt.f128 = float128_default_nan(&env->fp_status);
3422        }
3423    }
3424
3425    helper_compute_fprf_float128(env, xt.f128);
3426    putVSR(rD(opcode) + 32, &xt, env);
3427    float_check_status(env);
3428}
3429
3430void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
3431{
3432    ppc_vsr_t xt, xa, xb;
3433    float_status tstat;
3434
3435    getVSR(rA(opcode) + 32, &xa, env);
3436    getVSR(rB(opcode) + 32, &xb, env);
3437    getVSR(rD(opcode) + 32, &xt, env);
3438    helper_reset_fpstatus(env);
3439
3440    tstat = env->fp_status;
3441    if (unlikely(Rc(opcode) != 0)) {
3442        tstat.float_rounding_mode = float_round_to_odd;
3443    }
3444
3445    set_float_exception_flags(0, &tstat);
3446    xt.f128 = float128_sub(xa.f128, xb.f128, &tstat);
3447    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3448
3449    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3450        if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
3451            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
3452        } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
3453                   float128_is_signaling_nan(xb.f128, &tstat)) {
3454            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
3455        }
3456    }
3457
3458    helper_compute_fprf_float128(env, xt.f128);
3459    putVSR(rD(opcode) + 32, &xt, env);
3460    float_check_status(env);
3461}
3462