qemu/target/ppc/mem_helper.c
<<
>>
Prefs
   1/*
   2 *  PowerPC memory access emulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "cpu.h"
  22#include "exec/exec-all.h"
  23#include "qemu/host-utils.h"
  24#include "qemu/main-loop.h"
  25#include "exec/helper-proto.h"
  26#include "helper_regs.h"
  27#include "exec/cpu_ldst.h"
  28#include "internal.h"
  29#include "qemu/atomic128.h"
  30
  31/* #define DEBUG_OP */
  32
  33static inline bool needs_byteswap(const CPUPPCState *env)
  34{
  35#if TARGET_BIG_ENDIAN
  36  return FIELD_EX64(env->msr, MSR, LE);
  37#else
  38  return !FIELD_EX64(env->msr, MSR, LE);
  39#endif
  40}
  41
  42/*****************************************************************************/
  43/* Memory load and stores */
  44
  45static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr,
  46                                    target_long arg)
  47{
  48#if defined(TARGET_PPC64)
  49    if (!msr_is_64bit(env, env->msr)) {
  50        return (uint32_t)(addr + arg);
  51    } else
  52#endif
  53    {
  54        return addr + arg;
  55    }
  56}
  57
  58static void *probe_contiguous(CPUPPCState *env, target_ulong addr, uint32_t nb,
  59                              MMUAccessType access_type, int mmu_idx,
  60                              uintptr_t raddr)
  61{
  62    void *host1, *host2;
  63    uint32_t nb_pg1, nb_pg2;
  64
  65    nb_pg1 = -(addr | TARGET_PAGE_MASK);
  66    if (likely(nb <= nb_pg1)) {
  67        /* The entire operation is on a single page.  */
  68        return probe_access(env, addr, nb, access_type, mmu_idx, raddr);
  69    }
  70
  71    /* The operation spans two pages.  */
  72    nb_pg2 = nb - nb_pg1;
  73    host1 = probe_access(env, addr, nb_pg1, access_type, mmu_idx, raddr);
  74    addr = addr_add(env, addr, nb_pg1);
  75    host2 = probe_access(env, addr, nb_pg2, access_type, mmu_idx, raddr);
  76
  77    /* If the two host pages are contiguous, optimize.  */
  78    if (host2 == host1 + nb_pg1) {
  79        return host1;
  80    }
  81    return NULL;
  82}
  83
  84void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
  85{
  86    uintptr_t raddr = GETPC();
  87    int mmu_idx = cpu_mmu_index(env, false);
  88    void *host = probe_contiguous(env, addr, (32 - reg) * 4,
  89                                  MMU_DATA_LOAD, mmu_idx, raddr);
  90
  91    if (likely(host)) {
  92        /* Fast path -- the entire operation is in RAM at host.  */
  93        for (; reg < 32; reg++) {
  94            env->gpr[reg] = (uint32_t)ldl_be_p(host);
  95            host += 4;
  96        }
  97    } else {
  98        /* Slow path -- at least some of the operation requires i/o.  */
  99        for (; reg < 32; reg++) {
 100            env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr);
 101            addr = addr_add(env, addr, 4);
 102        }
 103    }
 104}
 105
 106void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
 107{
 108    uintptr_t raddr = GETPC();
 109    int mmu_idx = cpu_mmu_index(env, false);
 110    void *host = probe_contiguous(env, addr, (32 - reg) * 4,
 111                                  MMU_DATA_STORE, mmu_idx, raddr);
 112
 113    if (likely(host)) {
 114        /* Fast path -- the entire operation is in RAM at host.  */
 115        for (; reg < 32; reg++) {
 116            stl_be_p(host, env->gpr[reg]);
 117            host += 4;
 118        }
 119    } else {
 120        /* Slow path -- at least some of the operation requires i/o.  */
 121        for (; reg < 32; reg++) {
 122            cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr);
 123            addr = addr_add(env, addr, 4);
 124        }
 125    }
 126}
 127
 128static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
 129                   uint32_t reg, uintptr_t raddr)
 130{
 131    int mmu_idx;
 132    void *host;
 133    uint32_t val;
 134
 135    if (unlikely(nb == 0)) {
 136        return;
 137    }
 138
 139    mmu_idx = cpu_mmu_index(env, false);
 140    host = probe_contiguous(env, addr, nb, MMU_DATA_LOAD, mmu_idx, raddr);
 141
 142    if (likely(host)) {
 143        /* Fast path -- the entire operation is in RAM at host.  */
 144        for (; nb > 3; nb -= 4) {
 145            env->gpr[reg] = (uint32_t)ldl_be_p(host);
 146            reg = (reg + 1) % 32;
 147            host += 4;
 148        }
 149        switch (nb) {
 150        default:
 151            return;
 152        case 1:
 153            val = ldub_p(host) << 24;
 154            break;
 155        case 2:
 156            val = lduw_be_p(host) << 16;
 157            break;
 158        case 3:
 159            val = (lduw_be_p(host) << 16) | (ldub_p(host + 2) << 8);
 160            break;
 161        }
 162    } else {
 163        /* Slow path -- at least some of the operation requires i/o.  */
 164        for (; nb > 3; nb -= 4) {
 165            env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr);
 166            reg = (reg + 1) % 32;
 167            addr = addr_add(env, addr, 4);
 168        }
 169        switch (nb) {
 170        default:
 171            return;
 172        case 1:
 173            val = cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 24;
 174            break;
 175        case 2:
 176            val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16;
 177            break;
 178        case 3:
 179            val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16;
 180            addr = addr_add(env, addr, 2);
 181            val |= cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 8;
 182            break;
 183        }
 184    }
 185    env->gpr[reg] = val;
 186}
 187
 188void helper_lsw(CPUPPCState *env, target_ulong addr,
 189                uint32_t nb, uint32_t reg)
 190{
 191    do_lsw(env, addr, nb, reg, GETPC());
 192}
 193
 194/*
 195 * PPC32 specification says we must generate an exception if rA is in
 196 * the range of registers to be loaded.  In an other hand, IBM says
 197 * this is valid, but rA won't be loaded.  For now, I'll follow the
 198 * spec...
 199 */
 200void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
 201                 uint32_t ra, uint32_t rb)
 202{
 203    if (likely(xer_bc != 0)) {
 204        int num_used_regs = DIV_ROUND_UP(xer_bc, 4);
 205        if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) ||
 206                     lsw_reg_in_range(reg, num_used_regs, rb))) {
 207            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 208                                   POWERPC_EXCP_INVAL |
 209                                   POWERPC_EXCP_INVAL_LSWX, GETPC());
 210        } else {
 211            do_lsw(env, addr, xer_bc, reg, GETPC());
 212        }
 213    }
 214}
 215
 216void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
 217                 uint32_t reg)
 218{
 219    uintptr_t raddr = GETPC();
 220    int mmu_idx;
 221    void *host;
 222    uint32_t val;
 223
 224    if (unlikely(nb == 0)) {
 225        return;
 226    }
 227
 228    mmu_idx = cpu_mmu_index(env, false);
 229    host = probe_contiguous(env, addr, nb, MMU_DATA_STORE, mmu_idx, raddr);
 230
 231    if (likely(host)) {
 232        /* Fast path -- the entire operation is in RAM at host.  */
 233        for (; nb > 3; nb -= 4) {
 234            stl_be_p(host, env->gpr[reg]);
 235            reg = (reg + 1) % 32;
 236            host += 4;
 237        }
 238        val = env->gpr[reg];
 239        switch (nb) {
 240        case 1:
 241            stb_p(host, val >> 24);
 242            break;
 243        case 2:
 244            stw_be_p(host, val >> 16);
 245            break;
 246        case 3:
 247            stw_be_p(host, val >> 16);
 248            stb_p(host + 2, val >> 8);
 249            break;
 250        }
 251    } else {
 252        for (; nb > 3; nb -= 4) {
 253            cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr);
 254            reg = (reg + 1) % 32;
 255            addr = addr_add(env, addr, 4);
 256        }
 257        val = env->gpr[reg];
 258        switch (nb) {
 259        case 1:
 260            cpu_stb_mmuidx_ra(env, addr, val >> 24, mmu_idx, raddr);
 261            break;
 262        case 2:
 263            cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr);
 264            break;
 265        case 3:
 266            cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr);
 267            addr = addr_add(env, addr, 2);
 268            cpu_stb_mmuidx_ra(env, addr, val >> 8, mmu_idx, raddr);
 269            break;
 270        }
 271    }
 272}
 273
 274static void dcbz_common(CPUPPCState *env, target_ulong addr,
 275                        uint32_t opcode, bool epid, uintptr_t retaddr)
 276{
 277    target_ulong mask, dcbz_size = env->dcache_line_size;
 278    uint32_t i;
 279    void *haddr;
 280    int mmu_idx = epid ? PPC_TLB_EPID_STORE : cpu_mmu_index(env, false);
 281
 282#if defined(TARGET_PPC64)
 283    /* Check for dcbz vs dcbzl on 970 */
 284    if (env->excp_model == POWERPC_EXCP_970 &&
 285        !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
 286        dcbz_size = 32;
 287    }
 288#endif
 289
 290    /* Align address */
 291    mask = ~(dcbz_size - 1);
 292    addr &= mask;
 293
 294    /* Check reservation */
 295    if ((env->reserve_addr & mask) == addr)  {
 296        env->reserve_addr = (target_ulong)-1ULL;
 297    }
 298
 299    /* Try fast path translate */
 300    haddr = probe_write(env, addr, dcbz_size, mmu_idx, retaddr);
 301    if (haddr) {
 302        memset(haddr, 0, dcbz_size);
 303    } else {
 304        /* Slow path */
 305        for (i = 0; i < dcbz_size; i += 8) {
 306            cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr);
 307        }
 308    }
 309}
 310
 311void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode)
 312{
 313    dcbz_common(env, addr, opcode, false, GETPC());
 314}
 315
 316void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode)
 317{
 318    dcbz_common(env, addr, opcode, true, GETPC());
 319}
 320
 321void helper_icbi(CPUPPCState *env, target_ulong addr)
 322{
 323    addr &= ~(env->dcache_line_size - 1);
 324    /*
 325     * Invalidate one cache line :
 326     * PowerPC specification says this is to be treated like a load
 327     * (not a fetch) by the MMU. To be sure it will be so,
 328     * do the load "by hand".
 329     */
 330    cpu_ldl_data_ra(env, addr, GETPC());
 331}
 332
 333void helper_icbiep(CPUPPCState *env, target_ulong addr)
 334{
 335#if !defined(CONFIG_USER_ONLY)
 336    /* See comments above */
 337    addr &= ~(env->dcache_line_size - 1);
 338    cpu_ldl_mmuidx_ra(env, addr, PPC_TLB_EPID_LOAD, GETPC());
 339#endif
 340}
 341
 342/* XXX: to be tested */
 343target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
 344                          uint32_t ra, uint32_t rb)
 345{
 346    int i, c, d;
 347
 348    d = 24;
 349    for (i = 0; i < xer_bc; i++) {
 350        c = cpu_ldub_data_ra(env, addr, GETPC());
 351        addr = addr_add(env, addr, 1);
 352        /* ra (if not 0) and rb are never modified */
 353        if (likely(reg != rb && (ra == 0 || reg != ra))) {
 354            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
 355        }
 356        if (unlikely(c == xer_cmp)) {
 357            break;
 358        }
 359        if (likely(d != 0)) {
 360            d -= 8;
 361        } else {
 362            d = 24;
 363            reg++;
 364            reg = reg & 0x1F;
 365        }
 366    }
 367    return i;
 368}
 369
 370#ifdef TARGET_PPC64
 371uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
 372                               uint32_t opidx)
 373{
 374    Int128 ret;
 375
 376    /* We will have raised EXCP_ATOMIC from the translator.  */
 377    assert(HAVE_ATOMIC128);
 378    ret = cpu_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
 379    env->retxh = int128_gethi(ret);
 380    return int128_getlo(ret);
 381}
 382
 383uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
 384                               uint32_t opidx)
 385{
 386    Int128 ret;
 387
 388    /* We will have raised EXCP_ATOMIC from the translator.  */
 389    assert(HAVE_ATOMIC128);
 390    ret = cpu_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
 391    env->retxh = int128_gethi(ret);
 392    return int128_getlo(ret);
 393}
 394
 395void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
 396                            uint64_t lo, uint64_t hi, uint32_t opidx)
 397{
 398    Int128 val;
 399
 400    /* We will have raised EXCP_ATOMIC from the translator.  */
 401    assert(HAVE_ATOMIC128);
 402    val = int128_make128(lo, hi);
 403    cpu_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
 404}
 405
 406void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
 407                            uint64_t lo, uint64_t hi, uint32_t opidx)
 408{
 409    Int128 val;
 410
 411    /* We will have raised EXCP_ATOMIC from the translator.  */
 412    assert(HAVE_ATOMIC128);
 413    val = int128_make128(lo, hi);
 414    cpu_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
 415}
 416
 417uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
 418                                  uint64_t new_lo, uint64_t new_hi,
 419                                  uint32_t opidx)
 420{
 421    bool success = false;
 422
 423    /* We will have raised EXCP_ATOMIC from the translator.  */
 424    assert(HAVE_CMPXCHG128);
 425
 426    if (likely(addr == env->reserve_addr)) {
 427        Int128 oldv, cmpv, newv;
 428
 429        cmpv = int128_make128(env->reserve_val2, env->reserve_val);
 430        newv = int128_make128(new_lo, new_hi);
 431        oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
 432                                          opidx, GETPC());
 433        success = int128_eq(oldv, cmpv);
 434    }
 435    env->reserve_addr = -1;
 436    return env->so + success * CRF_EQ_BIT;
 437}
 438
 439uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
 440                                  uint64_t new_lo, uint64_t new_hi,
 441                                  uint32_t opidx)
 442{
 443    bool success = false;
 444
 445    /* We will have raised EXCP_ATOMIC from the translator.  */
 446    assert(HAVE_CMPXCHG128);
 447
 448    if (likely(addr == env->reserve_addr)) {
 449        Int128 oldv, cmpv, newv;
 450
 451        cmpv = int128_make128(env->reserve_val2, env->reserve_val);
 452        newv = int128_make128(new_lo, new_hi);
 453        oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
 454                                          opidx, GETPC());
 455        success = int128_eq(oldv, cmpv);
 456    }
 457    env->reserve_addr = -1;
 458    return env->so + success * CRF_EQ_BIT;
 459}
 460#endif
 461
 462/*****************************************************************************/
 463/* Altivec extension helpers */
 464#if HOST_BIG_ENDIAN
 465#define HI_IDX 0
 466#define LO_IDX 1
 467#else
 468#define HI_IDX 1
 469#define LO_IDX 0
 470#endif
 471
 472/*
 473 * We use MSR_LE to determine index ordering in a vector.  However,
 474 * byteswapping is not simply controlled by MSR_LE.  We also need to
 475 * take into account endianness of the target.  This is done for the
 476 * little-endian PPC64 user-mode target.
 477 */
 478
 479#define LVE(name, access, swap, element)                        \
 480    void helper_##name(CPUPPCState *env, ppc_avr_t *r,          \
 481                       target_ulong addr)                       \
 482    {                                                           \
 483        size_t n_elems = ARRAY_SIZE(r->element);                \
 484        int adjust = HI_IDX * (n_elems - 1);                    \
 485        int sh = sizeof(r->element[0]) >> 1;                    \
 486        int index = (addr & 0xf) >> sh;                         \
 487        if (FIELD_EX64(env->msr, MSR, LE)) {                    \
 488            index = n_elems - index - 1;                        \
 489        }                                                       \
 490                                                                \
 491        if (needs_byteswap(env)) {                              \
 492            r->element[LO_IDX ? index : (adjust - index)] =     \
 493                swap(access(env, addr, GETPC()));               \
 494        } else {                                                \
 495            r->element[LO_IDX ? index : (adjust - index)] =     \
 496                access(env, addr, GETPC());                     \
 497        }                                                       \
 498    }
 499#define I(x) (x)
 500LVE(lvebx, cpu_ldub_data_ra, I, u8)
 501LVE(lvehx, cpu_lduw_data_ra, bswap16, u16)
 502LVE(lvewx, cpu_ldl_data_ra, bswap32, u32)
 503#undef I
 504#undef LVE
 505
 506#define STVE(name, access, swap, element)                               \
 507    void helper_##name(CPUPPCState *env, ppc_avr_t *r,                  \
 508                       target_ulong addr)                               \
 509    {                                                                   \
 510        size_t n_elems = ARRAY_SIZE(r->element);                        \
 511        int adjust = HI_IDX * (n_elems - 1);                            \
 512        int sh = sizeof(r->element[0]) >> 1;                            \
 513        int index = (addr & 0xf) >> sh;                                 \
 514        if (FIELD_EX64(env->msr, MSR, LE)) {                            \
 515            index = n_elems - index - 1;                                \
 516        }                                                               \
 517                                                                        \
 518        if (needs_byteswap(env)) {                                      \
 519            access(env, addr, swap(r->element[LO_IDX ? index :          \
 520                                              (adjust - index)]),       \
 521                        GETPC());                                       \
 522        } else {                                                        \
 523            access(env, addr, r->element[LO_IDX ? index :               \
 524                                         (adjust - index)], GETPC());   \
 525        }                                                               \
 526    }
 527#define I(x) (x)
 528STVE(stvebx, cpu_stb_data_ra, I, u8)
 529STVE(stvehx, cpu_stw_data_ra, bswap16, u16)
 530STVE(stvewx, cpu_stl_data_ra, bswap32, u32)
 531#undef I
 532#undef LVE
 533
 534#ifdef TARGET_PPC64
 535#define GET_NB(rb) ((rb >> 56) & 0xFF)
 536
 537#define VSX_LXVL(name, lj)                                              \
 538void helper_##name(CPUPPCState *env, target_ulong addr,                 \
 539                   ppc_vsr_t *xt, target_ulong rb)                      \
 540{                                                                       \
 541    ppc_vsr_t t;                                                        \
 542    uint64_t nb = GET_NB(rb);                                           \
 543    int i;                                                              \
 544                                                                        \
 545    t.s128 = int128_zero();                                             \
 546    if (nb) {                                                           \
 547        nb = (nb >= 16) ? 16 : nb;                                      \
 548        if (FIELD_EX64(env->msr, MSR, LE) && !lj) {                     \
 549            for (i = 16; i > 16 - nb; i--) {                            \
 550                t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC());   \
 551                addr = addr_add(env, addr, 1);                          \
 552            }                                                           \
 553        } else {                                                        \
 554            for (i = 0; i < nb; i++) {                                  \
 555                t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC());       \
 556                addr = addr_add(env, addr, 1);                          \
 557            }                                                           \
 558        }                                                               \
 559    }                                                                   \
 560    *xt = t;                                                            \
 561}
 562
 563VSX_LXVL(lxvl, 0)
 564VSX_LXVL(lxvll, 1)
 565#undef VSX_LXVL
 566
 567#define VSX_STXVL(name, lj)                                       \
 568void helper_##name(CPUPPCState *env, target_ulong addr,           \
 569                   ppc_vsr_t *xt, target_ulong rb)                \
 570{                                                                 \
 571    target_ulong nb = GET_NB(rb);                                 \
 572    int i;                                                        \
 573                                                                  \
 574    if (!nb) {                                                    \
 575        return;                                                   \
 576    }                                                             \
 577                                                                  \
 578    nb = (nb >= 16) ? 16 : nb;                                    \
 579    if (FIELD_EX64(env->msr, MSR, LE) && !lj) {                   \
 580        for (i = 16; i > 16 - nb; i--) {                          \
 581            cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
 582            addr = addr_add(env, addr, 1);                        \
 583        }                                                         \
 584    } else {                                                      \
 585        for (i = 0; i < nb; i++) {                                \
 586            cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC());     \
 587            addr = addr_add(env, addr, 1);                        \
 588        }                                                         \
 589    }                                                             \
 590}
 591
 592VSX_STXVL(stxvl, 0)
 593VSX_STXVL(stxvll, 1)
 594#undef VSX_STXVL
 595#undef GET_NB
 596#endif /* TARGET_PPC64 */
 597
 598#undef HI_IDX
 599#undef LO_IDX
 600
 601void helper_tbegin(CPUPPCState *env)
 602{
 603    /*
 604     * As a degenerate implementation, always fail tbegin.  The reason
 605     * given is "Nesting overflow".  The "persistent" bit is set,
 606     * providing a hint to the error handler to not retry.  The TFIAR
 607     * captures the address of the failure, which is this tbegin
 608     * instruction.  Instruction execution will continue with the next
 609     * instruction in memory, which is precisely what we want.
 610     */
 611
 612    env->spr[SPR_TEXASR] =
 613        (1ULL << TEXASR_FAILURE_PERSISTENT) |
 614        (1ULL << TEXASR_NESTING_OVERFLOW) |
 615        (FIELD_EX64_HV(env->msr) << TEXASR_PRIVILEGE_HV) |
 616        (FIELD_EX64(env->msr, MSR, PR) << TEXASR_PRIVILEGE_PR) |
 617        (1ULL << TEXASR_FAILURE_SUMMARY) |
 618        (1ULL << TEXASR_TFIAR_EXACT);
 619    env->spr[SPR_TFIAR] = env->nip | (FIELD_EX64_HV(env->msr) << 1) |
 620                          FIELD_EX64(env->msr, MSR, PR);
 621    env->spr[SPR_TFHAR] = env->nip + 4;
 622    env->crf[0] = 0xB; /* 0b1010 = transaction failure */
 623}
 624