qemu/target/mips/op_helper.c
<<
>>
Prefs
   1/*
   2 *  MIPS emulation helpers for qemu.
   3 *
   4 *  Copyright (c) 2004-2005 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 *
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "qemu/main-loop.h"
  23#include "cpu.h"
  24#include "internal.h"
  25#include "qemu/host-utils.h"
  26#include "exec/helper-proto.h"
  27#include "exec/exec-all.h"
  28#include "exec/cpu_ldst.h"
  29#include "exec/memop.h"
  30#include "sysemu/kvm.h"
  31
  32
  33/*****************************************************************************/
  34/* Exceptions processing helpers */
  35
  36void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
  37                                int error_code)
  38{
  39    do_raise_exception_err(env, exception, error_code, 0);
  40}
  41
  42void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
  43{
  44    do_raise_exception(env, exception, GETPC());
  45}
  46
  47void helper_raise_exception_debug(CPUMIPSState *env)
  48{
  49    do_raise_exception(env, EXCP_DEBUG, 0);
  50}
  51
  52static void raise_exception(CPUMIPSState *env, uint32_t exception)
  53{
  54    do_raise_exception(env, exception, 0);
  55}
  56
  57/* 64 bits arithmetic for 32 bits hosts */
  58static inline uint64_t get_HILO(CPUMIPSState *env)
  59{
  60    return ((uint64_t)(env->active_tc.HI[0]) << 32) |
  61           (uint32_t)env->active_tc.LO[0];
  62}
  63
  64static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
  65{
  66    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
  67    return env->active_tc.HI[0] = (int32_t)(HILO >> 32);
  68}
  69
  70static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
  71{
  72    target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
  73    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
  74    return tmp;
  75}
  76
  77/* Multiplication variants of the vr54xx. */
  78target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
  79                         target_ulong arg2)
  80{
  81    return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
  82                                 (int64_t)(int32_t)arg2));
  83}
  84
  85target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
  86                          target_ulong arg2)
  87{
  88    return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
  89                       (uint64_t)(uint32_t)arg2);
  90}
  91
  92target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
  93                         target_ulong arg2)
  94{
  95    return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
  96                       (int64_t)(int32_t)arg2);
  97}
  98
  99target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
 100                           target_ulong arg2)
 101{
 102    return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
 103                       (int64_t)(int32_t)arg2);
 104}
 105
 106target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
 107                          target_ulong arg2)
 108{
 109    return set_HI_LOT0(env, (uint64_t)get_HILO(env) +
 110                       (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
 111}
 112
 113target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
 114                            target_ulong arg2)
 115{
 116    return set_HIT0_LO(env, (uint64_t)get_HILO(env) +
 117                       (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
 118}
 119
 120target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
 121                         target_ulong arg2)
 122{
 123    return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
 124                       (int64_t)(int32_t)arg2);
 125}
 126
 127target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
 128                           target_ulong arg2)
 129{
 130    return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
 131                       (int64_t)(int32_t)arg2);
 132}
 133
 134target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
 135                          target_ulong arg2)
 136{
 137    return set_HI_LOT0(env, (uint64_t)get_HILO(env) -
 138                       (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
 139}
 140
 141target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
 142                            target_ulong arg2)
 143{
 144    return set_HIT0_LO(env, (uint64_t)get_HILO(env) -
 145                       (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
 146}
 147
 148target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
 149                          target_ulong arg2)
 150{
 151    return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
 152}
 153
 154target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
 155                           target_ulong arg2)
 156{
 157    return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
 158                       (uint64_t)(uint32_t)arg2);
 159}
 160
 161target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
 162                           target_ulong arg2)
 163{
 164    return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
 165                       (int64_t)(int32_t)arg2);
 166}
 167
 168target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
 169                            target_ulong arg2)
 170{
 171    return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
 172                       (uint64_t)(uint32_t)arg2);
 173}
 174
 175static inline target_ulong bitswap(target_ulong v)
 176{
 177    v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
 178              ((v & (target_ulong)0x5555555555555555ULL) << 1);
 179    v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
 180              ((v & (target_ulong)0x3333333333333333ULL) << 2);
 181    v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
 182              ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
 183    return v;
 184}
 185
 186#ifdef TARGET_MIPS64
 187target_ulong helper_dbitswap(target_ulong rt)
 188{
 189    return bitswap(rt);
 190}
 191#endif
 192
 193target_ulong helper_bitswap(target_ulong rt)
 194{
 195    return (int32_t)bitswap(rt);
 196}
 197
 198target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx,
 199                        uint32_t stripe)
 200{
 201    int i;
 202    uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff);
 203    uint64_t tmp1 = tmp0;
 204    for (i = 0; i <= 46; i++) {
 205        int s;
 206        if (i & 0x8) {
 207            s = shift;
 208        } else {
 209            s = shiftx;
 210        }
 211
 212        if (stripe != 0 && !(i & 0x4)) {
 213            s = ~s;
 214        }
 215        if (s & 0x10) {
 216            if (tmp0 & (1LL << (i + 16))) {
 217                tmp1 |= 1LL << i;
 218            } else {
 219                tmp1 &= ~(1LL << i);
 220            }
 221        }
 222    }
 223
 224    uint64_t tmp2 = tmp1;
 225    for (i = 0; i <= 38; i++) {
 226        int s;
 227        if (i & 0x4) {
 228            s = shift;
 229        } else {
 230            s = shiftx;
 231        }
 232
 233        if (s & 0x8) {
 234            if (tmp1 & (1LL << (i + 8))) {
 235                tmp2 |= 1LL << i;
 236            } else {
 237                tmp2 &= ~(1LL << i);
 238            }
 239        }
 240    }
 241
 242    uint64_t tmp3 = tmp2;
 243    for (i = 0; i <= 34; i++) {
 244        int s;
 245        if (i & 0x2) {
 246            s = shift;
 247        } else {
 248            s = shiftx;
 249        }
 250        if (s & 0x4) {
 251            if (tmp2 & (1LL << (i + 4))) {
 252                tmp3 |= 1LL << i;
 253            } else {
 254                tmp3 &= ~(1LL << i);
 255            }
 256        }
 257    }
 258
 259    uint64_t tmp4 = tmp3;
 260    for (i = 0; i <= 32; i++) {
 261        int s;
 262        if (i & 0x1) {
 263            s = shift;
 264        } else {
 265            s = shiftx;
 266        }
 267        if (s & 0x2) {
 268            if (tmp3 & (1LL << (i + 2))) {
 269                tmp4 |= 1LL << i;
 270            } else {
 271                tmp4 &= ~(1LL << i);
 272            }
 273        }
 274    }
 275
 276    uint64_t tmp5 = tmp4;
 277    for (i = 0; i <= 31; i++) {
 278        int s;
 279        s = shift;
 280        if (s & 0x1) {
 281            if (tmp4 & (1LL << (i + 1))) {
 282                tmp5 |= 1LL << i;
 283            } else {
 284                tmp5 &= ~(1LL << i);
 285            }
 286        }
 287    }
 288
 289    return (int64_t)(int32_t)(uint32_t)tmp5;
 290}
 291
 292#ifndef CONFIG_USER_ONLY
 293
 294static inline hwaddr do_translate_address(CPUMIPSState *env,
 295                                                      target_ulong address,
 296                                                      int rw, uintptr_t retaddr)
 297{
 298    hwaddr paddr;
 299    CPUState *cs = env_cpu(env);
 300
 301    paddr = cpu_mips_translate_address(env, address, rw);
 302
 303    if (paddr == -1LL) {
 304        cpu_loop_exit_restore(cs, retaddr);
 305    } else {
 306        return paddr;
 307    }
 308}
 309
 310#define HELPER_LD_ATOMIC(name, insn, almask, do_cast)                         \
 311target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx)  \
 312{                                                                             \
 313    if (arg & almask) {                                                       \
 314        if (!(env->hflags & MIPS_HFLAG_DM)) {                                 \
 315            env->CP0_BadVAddr = arg;                                          \
 316        }                                                                     \
 317        do_raise_exception(env, EXCP_AdEL, GETPC());                          \
 318    }                                                                         \
 319    env->CP0_LLAddr = do_translate_address(env, arg, 0, GETPC());             \
 320    env->lladdr = arg;                                                        \
 321    env->llval = do_cast cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC());  \
 322    return env->llval;                                                        \
 323}
 324HELPER_LD_ATOMIC(ll, ldl, 0x3, (target_long)(int32_t))
 325#ifdef TARGET_MIPS64
 326HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong))
 327#endif
 328#undef HELPER_LD_ATOMIC
 329#endif
 330
 331#ifdef TARGET_WORDS_BIGENDIAN
 332#define GET_LMASK(v) ((v) & 3)
 333#define GET_OFFSET(addr, offset) (addr + (offset))
 334#else
 335#define GET_LMASK(v) (((v) & 3) ^ 3)
 336#define GET_OFFSET(addr, offset) (addr - (offset))
 337#endif
 338
 339void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
 340                int mem_idx)
 341{
 342    cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
 343
 344    if (GET_LMASK(arg2) <= 2) {
 345        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16),
 346                          mem_idx, GETPC());
 347    }
 348
 349    if (GET_LMASK(arg2) <= 1) {
 350        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8),
 351                          mem_idx, GETPC());
 352    }
 353
 354    if (GET_LMASK(arg2) == 0) {
 355        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1,
 356                          mem_idx, GETPC());
 357    }
 358}
 359
 360void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
 361                int mem_idx)
 362{
 363    cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
 364
 365    if (GET_LMASK(arg2) >= 1) {
 366        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
 367                          mem_idx, GETPC());
 368    }
 369
 370    if (GET_LMASK(arg2) >= 2) {
 371        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
 372                          mem_idx, GETPC());
 373    }
 374
 375    if (GET_LMASK(arg2) == 3) {
 376        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
 377                          mem_idx, GETPC());
 378    }
 379}
 380
 381#if defined(TARGET_MIPS64)
 382/*
 383 * "half" load and stores.  We must do the memory access inline,
 384 * or fault handling won't work.
 385 */
 386#ifdef TARGET_WORDS_BIGENDIAN
 387#define GET_LMASK64(v) ((v) & 7)
 388#else
 389#define GET_LMASK64(v) (((v) & 7) ^ 7)
 390#endif
 391
 392void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
 393                int mem_idx)
 394{
 395    cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
 396
 397    if (GET_LMASK64(arg2) <= 6) {
 398        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48),
 399                          mem_idx, GETPC());
 400    }
 401
 402    if (GET_LMASK64(arg2) <= 5) {
 403        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40),
 404                          mem_idx, GETPC());
 405    }
 406
 407    if (GET_LMASK64(arg2) <= 4) {
 408        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32),
 409                          mem_idx, GETPC());
 410    }
 411
 412    if (GET_LMASK64(arg2) <= 3) {
 413        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24),
 414                          mem_idx, GETPC());
 415    }
 416
 417    if (GET_LMASK64(arg2) <= 2) {
 418        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16),
 419                          mem_idx, GETPC());
 420    }
 421
 422    if (GET_LMASK64(arg2) <= 1) {
 423        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8),
 424                          mem_idx, GETPC());
 425    }
 426
 427    if (GET_LMASK64(arg2) <= 0) {
 428        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1,
 429                          mem_idx, GETPC());
 430    }
 431}
 432
 433void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
 434                int mem_idx)
 435{
 436    cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
 437
 438    if (GET_LMASK64(arg2) >= 1) {
 439        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
 440                          mem_idx, GETPC());
 441    }
 442
 443    if (GET_LMASK64(arg2) >= 2) {
 444        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
 445                          mem_idx, GETPC());
 446    }
 447
 448    if (GET_LMASK64(arg2) >= 3) {
 449        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
 450                          mem_idx, GETPC());
 451    }
 452
 453    if (GET_LMASK64(arg2) >= 4) {
 454        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32),
 455                          mem_idx, GETPC());
 456    }
 457
 458    if (GET_LMASK64(arg2) >= 5) {
 459        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40),
 460                          mem_idx, GETPC());
 461    }
 462
 463    if (GET_LMASK64(arg2) >= 6) {
 464        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48),
 465                          mem_idx, GETPC());
 466    }
 467
 468    if (GET_LMASK64(arg2) == 7) {
 469        cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56),
 470                          mem_idx, GETPC());
 471    }
 472}
 473#endif /* TARGET_MIPS64 */
 474
 475static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
 476
 477void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
 478                uint32_t mem_idx)
 479{
 480    target_ulong base_reglist = reglist & 0xf;
 481    target_ulong do_r31 = reglist & 0x10;
 482
 483    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
 484        target_ulong i;
 485
 486        for (i = 0; i < base_reglist; i++) {
 487            env->active_tc.gpr[multiple_regs[i]] =
 488                (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
 489            addr += 4;
 490        }
 491    }
 492
 493    if (do_r31) {
 494        env->active_tc.gpr[31] =
 495            (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
 496    }
 497}
 498
 499void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
 500                uint32_t mem_idx)
 501{
 502    target_ulong base_reglist = reglist & 0xf;
 503    target_ulong do_r31 = reglist & 0x10;
 504
 505    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
 506        target_ulong i;
 507
 508        for (i = 0; i < base_reglist; i++) {
 509            cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
 510                              mem_idx, GETPC());
 511            addr += 4;
 512        }
 513    }
 514
 515    if (do_r31) {
 516        cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
 517    }
 518}
 519
 520#if defined(TARGET_MIPS64)
 521void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
 522                uint32_t mem_idx)
 523{
 524    target_ulong base_reglist = reglist & 0xf;
 525    target_ulong do_r31 = reglist & 0x10;
 526
 527    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
 528        target_ulong i;
 529
 530        for (i = 0; i < base_reglist; i++) {
 531            env->active_tc.gpr[multiple_regs[i]] =
 532                cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
 533            addr += 8;
 534        }
 535    }
 536
 537    if (do_r31) {
 538        env->active_tc.gpr[31] =
 539            cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
 540    }
 541}
 542
 543void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
 544                uint32_t mem_idx)
 545{
 546    target_ulong base_reglist = reglist & 0xf;
 547    target_ulong do_r31 = reglist & 0x10;
 548
 549    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
 550        target_ulong i;
 551
 552        for (i = 0; i < base_reglist; i++) {
 553            cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
 554                              mem_idx, GETPC());
 555            addr += 8;
 556        }
 557    }
 558
 559    if (do_r31) {
 560        cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
 561    }
 562}
 563#endif
 564
 565
 566void helper_fork(target_ulong arg1, target_ulong arg2)
 567{
 568    /*
 569     * arg1 = rt, arg2 = rs
 570     * TODO: store to TC register
 571     */
 572}
 573
 574target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
 575{
 576    target_long arg1 = arg;
 577
 578    if (arg1 < 0) {
 579        /* No scheduling policy implemented. */
 580        if (arg1 != -2) {
 581            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
 582                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
 583                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
 584                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
 585                do_raise_exception(env, EXCP_THREAD, GETPC());
 586            }
 587        }
 588    } else if (arg1 == 0) {
 589        if (0) {
 590            /* TODO: TC underflow */
 591            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
 592            do_raise_exception(env, EXCP_THREAD, GETPC());
 593        } else {
 594            /* TODO: Deallocate TC */
 595        }
 596    } else if (arg1 > 0) {
 597        /* Yield qualifier inputs not implemented. */
 598        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
 599        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
 600        do_raise_exception(env, EXCP_THREAD, GETPC());
 601    }
 602    return env->CP0_YQMask;
 603}
 604
 605#ifndef CONFIG_USER_ONLY
 606/* TLB management */
 607static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
 608{
 609    /* Discard entries from env->tlb[first] onwards.  */
 610    while (env->tlb->tlb_in_use > first) {
 611        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
 612    }
 613}
 614
 615static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
 616{
 617#if defined(TARGET_MIPS64)
 618    return extract64(entrylo, 6, 54);
 619#else
 620    return extract64(entrylo, 6, 24) | /* PFN */
 621           (extract64(entrylo, 32, 32) << 24); /* PFNX */
 622#endif
 623}
 624
 625static void r4k_fill_tlb(CPUMIPSState *env, int idx)
 626{
 627    r4k_tlb_t *tlb;
 628    uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
 629
 630    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
 631    tlb = &env->tlb->mmu.r4k.tlb[idx];
 632    if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
 633        tlb->EHINV = 1;
 634        return;
 635    }
 636    tlb->EHINV = 0;
 637    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
 638#if defined(TARGET_MIPS64)
 639    tlb->VPN &= env->SEGMask;
 640#endif
 641    tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
 642    tlb->MMID = env->CP0_MemoryMapID;
 643    tlb->PageMask = env->CP0_PageMask;
 644    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
 645    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
 646    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
 647    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
 648    tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
 649    tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
 650    tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
 651    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
 652    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
 653    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
 654    tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
 655    tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
 656    tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
 657}
 658
 659void r4k_helper_tlbinv(CPUMIPSState *env)
 660{
 661    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
 662    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
 663    uint32_t MMID = env->CP0_MemoryMapID;
 664    uint32_t tlb_mmid;
 665    r4k_tlb_t *tlb;
 666    int idx;
 667
 668    MMID = mi ? MMID : (uint32_t) ASID;
 669    for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
 670        tlb = &env->tlb->mmu.r4k.tlb[idx];
 671        tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
 672        if (!tlb->G && tlb_mmid == MMID) {
 673            tlb->EHINV = 1;
 674        }
 675    }
 676    cpu_mips_tlb_flush(env);
 677}
 678
 679void r4k_helper_tlbinvf(CPUMIPSState *env)
 680{
 681    int idx;
 682
 683    for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
 684        env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
 685    }
 686    cpu_mips_tlb_flush(env);
 687}
 688
 689void r4k_helper_tlbwi(CPUMIPSState *env)
 690{
 691    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
 692    target_ulong VPN;
 693    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
 694    uint32_t MMID = env->CP0_MemoryMapID;
 695    uint32_t tlb_mmid;
 696    bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
 697    r4k_tlb_t *tlb;
 698    int idx;
 699
 700    MMID = mi ? MMID : (uint32_t) ASID;
 701
 702    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
 703    tlb = &env->tlb->mmu.r4k.tlb[idx];
 704    VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
 705#if defined(TARGET_MIPS64)
 706    VPN &= env->SEGMask;
 707#endif
 708    EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
 709    G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
 710    V0 = (env->CP0_EntryLo0 & 2) != 0;
 711    D0 = (env->CP0_EntryLo0 & 4) != 0;
 712    XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
 713    RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
 714    V1 = (env->CP0_EntryLo1 & 2) != 0;
 715    D1 = (env->CP0_EntryLo1 & 4) != 0;
 716    XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
 717    RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
 718
 719    tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
 720    /*
 721     * Discard cached TLB entries, unless tlbwi is just upgrading access
 722     * permissions on the current entry.
 723     */
 724    if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
 725        (!tlb->EHINV && EHINV) ||
 726        (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
 727        (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
 728        (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
 729        (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
 730        r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
 731    }
 732
 733    r4k_invalidate_tlb(env, idx, 0);
 734    r4k_fill_tlb(env, idx);
 735}
 736
 737void r4k_helper_tlbwr(CPUMIPSState *env)
 738{
 739    int r = cpu_mips_get_random(env);
 740
 741    r4k_invalidate_tlb(env, r, 1);
 742    r4k_fill_tlb(env, r);
 743}
 744
 745void r4k_helper_tlbp(CPUMIPSState *env)
 746{
 747    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
 748    r4k_tlb_t *tlb;
 749    target_ulong mask;
 750    target_ulong tag;
 751    target_ulong VPN;
 752    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
 753    uint32_t MMID = env->CP0_MemoryMapID;
 754    uint32_t tlb_mmid;
 755    int i;
 756
 757    MMID = mi ? MMID : (uint32_t) ASID;
 758    for (i = 0; i < env->tlb->nb_tlb; i++) {
 759        tlb = &env->tlb->mmu.r4k.tlb[i];
 760        /* 1k pages are not supported. */
 761        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
 762        tag = env->CP0_EntryHi & ~mask;
 763        VPN = tlb->VPN & ~mask;
 764#if defined(TARGET_MIPS64)
 765        tag &= env->SEGMask;
 766#endif
 767        tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
 768        /* Check ASID/MMID, virtual page number & size */
 769        if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
 770            /* TLB match */
 771            env->CP0_Index = i;
 772            break;
 773        }
 774    }
 775    if (i == env->tlb->nb_tlb) {
 776        /* No match.  Discard any shadow entries, if any of them match.  */
 777        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
 778            tlb = &env->tlb->mmu.r4k.tlb[i];
 779            /* 1k pages are not supported. */
 780            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
 781            tag = env->CP0_EntryHi & ~mask;
 782            VPN = tlb->VPN & ~mask;
 783#if defined(TARGET_MIPS64)
 784            tag &= env->SEGMask;
 785#endif
 786            tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
 787            /* Check ASID/MMID, virtual page number & size */
 788            if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
 789                r4k_mips_tlb_flush_extra(env, i);
 790                break;
 791            }
 792        }
 793
 794        env->CP0_Index |= 0x80000000;
 795    }
 796}
 797
 798static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
 799{
 800#if defined(TARGET_MIPS64)
 801    return tlb_pfn << 6;
 802#else
 803    return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
 804           (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
 805#endif
 806}
 807
 808void r4k_helper_tlbr(CPUMIPSState *env)
 809{
 810    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
 811    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
 812    uint32_t MMID = env->CP0_MemoryMapID;
 813    uint32_t tlb_mmid;
 814    r4k_tlb_t *tlb;
 815    int idx;
 816
 817    MMID = mi ? MMID : (uint32_t) ASID;
 818    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
 819    tlb = &env->tlb->mmu.r4k.tlb[idx];
 820
 821    tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
 822    /* If this will change the current ASID/MMID, flush qemu's TLB.  */
 823    if (MMID != tlb_mmid) {
 824        cpu_mips_tlb_flush(env);
 825    }
 826
 827    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
 828
 829    if (tlb->EHINV) {
 830        env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
 831        env->CP0_PageMask = 0;
 832        env->CP0_EntryLo0 = 0;
 833        env->CP0_EntryLo1 = 0;
 834    } else {
 835        env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
 836        env->CP0_MemoryMapID = tlb->MMID;
 837        env->CP0_PageMask = tlb->PageMask;
 838        env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
 839                        ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
 840                        ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
 841                        get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
 842        env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
 843                        ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
 844                        ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
 845                        get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
 846    }
 847}
 848
 849void helper_tlbwi(CPUMIPSState *env)
 850{
 851    env->tlb->helper_tlbwi(env);
 852}
 853
 854void helper_tlbwr(CPUMIPSState *env)
 855{
 856    env->tlb->helper_tlbwr(env);
 857}
 858
 859void helper_tlbp(CPUMIPSState *env)
 860{
 861    env->tlb->helper_tlbp(env);
 862}
 863
 864void helper_tlbr(CPUMIPSState *env)
 865{
 866    env->tlb->helper_tlbr(env);
 867}
 868
 869void helper_tlbinv(CPUMIPSState *env)
 870{
 871    env->tlb->helper_tlbinv(env);
 872}
 873
 874void helper_tlbinvf(CPUMIPSState *env)
 875{
 876    env->tlb->helper_tlbinvf(env);
 877}
 878
 879static void global_invalidate_tlb(CPUMIPSState *env,
 880                           uint32_t invMsgVPN2,
 881                           uint8_t invMsgR,
 882                           uint32_t invMsgMMid,
 883                           bool invAll,
 884                           bool invVAMMid,
 885                           bool invMMid,
 886                           bool invVA)
 887{
 888
 889    int idx;
 890    r4k_tlb_t *tlb;
 891    bool VAMatch;
 892    bool MMidMatch;
 893
 894    for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
 895        tlb = &env->tlb->mmu.r4k.tlb[idx];
 896        VAMatch =
 897            (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
 898#ifdef TARGET_MIPS64
 899            &&
 900            (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
 901#endif
 902            );
 903        MMidMatch = tlb->MMID == invMsgMMid;
 904        if ((invAll && (idx > env->CP0_Wired)) ||
 905            (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
 906            (VAMatch && invVA) ||
 907            (MMidMatch && !(tlb->G) && invMMid)) {
 908            tlb->EHINV = 1;
 909        }
 910    }
 911    cpu_mips_tlb_flush(env);
 912}
 913
 914void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
 915{
 916    bool invAll = type == 0;
 917    bool invVA = type == 1;
 918    bool invMMid = type == 2;
 919    bool invVAMMid = type == 3;
 920    uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
 921    uint8_t invMsgR = 0;
 922    uint32_t invMsgMMid = env->CP0_MemoryMapID;
 923    CPUState *other_cs = first_cpu;
 924
 925#ifdef TARGET_MIPS64
 926    invMsgR = extract64(arg, 62, 2);
 927#endif
 928
 929    CPU_FOREACH(other_cs) {
 930        MIPSCPU *other_cpu = MIPS_CPU(other_cs);
 931        global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
 932                              invAll, invVAMMid, invMMid, invVA);
 933    }
 934}
 935
 936/* Specials */
 937target_ulong helper_di(CPUMIPSState *env)
 938{
 939    target_ulong t0 = env->CP0_Status;
 940
 941    env->CP0_Status = t0 & ~(1 << CP0St_IE);
 942    return t0;
 943}
 944
 945target_ulong helper_ei(CPUMIPSState *env)
 946{
 947    target_ulong t0 = env->CP0_Status;
 948
 949    env->CP0_Status = t0 | (1 << CP0St_IE);
 950    return t0;
 951}
 952
 953static void debug_pre_eret(CPUMIPSState *env)
 954{
 955    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
 956        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
 957                env->active_tc.PC, env->CP0_EPC);
 958        if (env->CP0_Status & (1 << CP0St_ERL)) {
 959            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
 960        }
 961        if (env->hflags & MIPS_HFLAG_DM) {
 962            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
 963        }
 964        qemu_log("\n");
 965    }
 966}
 967
 968static void debug_post_eret(CPUMIPSState *env)
 969{
 970    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
 971        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
 972                env->active_tc.PC, env->CP0_EPC);
 973        if (env->CP0_Status & (1 << CP0St_ERL)) {
 974            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
 975        }
 976        if (env->hflags & MIPS_HFLAG_DM) {
 977            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
 978        }
 979        switch (cpu_mmu_index(env, false)) {
 980        case 3:
 981            qemu_log(", ERL\n");
 982            break;
 983        case MIPS_HFLAG_UM:
 984            qemu_log(", UM\n");
 985            break;
 986        case MIPS_HFLAG_SM:
 987            qemu_log(", SM\n");
 988            break;
 989        case MIPS_HFLAG_KM:
 990            qemu_log("\n");
 991            break;
 992        default:
 993            cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
 994            break;
 995        }
 996    }
 997}
 998
 999static void set_pc(CPUMIPSState *env, target_ulong error_pc)
1000{
1001    env->active_tc.PC = error_pc & ~(target_ulong)1;
1002    if (error_pc & 1) {
1003        env->hflags |= MIPS_HFLAG_M16;
1004    } else {
1005        env->hflags &= ~(MIPS_HFLAG_M16);
1006    }
1007}
1008
1009static inline void exception_return(CPUMIPSState *env)
1010{
1011    debug_pre_eret(env);
1012    if (env->CP0_Status & (1 << CP0St_ERL)) {
1013        set_pc(env, env->CP0_ErrorEPC);
1014        env->CP0_Status &= ~(1 << CP0St_ERL);
1015    } else {
1016        set_pc(env, env->CP0_EPC);
1017        env->CP0_Status &= ~(1 << CP0St_EXL);
1018    }
1019    compute_hflags(env);
1020    debug_post_eret(env);
1021}
1022
1023void helper_eret(CPUMIPSState *env)
1024{
1025    exception_return(env);
1026    env->CP0_LLAddr = 1;
1027    env->lladdr = 1;
1028}
1029
1030void helper_eretnc(CPUMIPSState *env)
1031{
1032    exception_return(env);
1033}
1034
1035void helper_deret(CPUMIPSState *env)
1036{
1037    debug_pre_eret(env);
1038
1039    env->hflags &= ~MIPS_HFLAG_DM;
1040    compute_hflags(env);
1041
1042    set_pc(env, env->CP0_DEPC);
1043
1044    debug_post_eret(env);
1045}
1046#endif /* !CONFIG_USER_ONLY */
1047
1048static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc)
1049{
1050    if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) {
1051        return;
1052    }
1053    do_raise_exception(env, EXCP_RI, pc);
1054}
1055
1056target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
1057{
1058    check_hwrena(env, 0, GETPC());
1059    return env->CP0_EBase & 0x3ff;
1060}
1061
1062target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
1063{
1064    check_hwrena(env, 1, GETPC());
1065    return env->SYNCI_Step;
1066}
1067
1068target_ulong helper_rdhwr_cc(CPUMIPSState *env)
1069{
1070    check_hwrena(env, 2, GETPC());
1071#ifdef CONFIG_USER_ONLY
1072    return env->CP0_Count;
1073#else
1074    return (int32_t)cpu_mips_get_count(env);
1075#endif
1076}
1077
1078target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
1079{
1080    check_hwrena(env, 3, GETPC());
1081    return env->CCRes;
1082}
1083
1084target_ulong helper_rdhwr_performance(CPUMIPSState *env)
1085{
1086    check_hwrena(env, 4, GETPC());
1087    return env->CP0_Performance0;
1088}
1089
1090target_ulong helper_rdhwr_xnp(CPUMIPSState *env)
1091{
1092    check_hwrena(env, 5, GETPC());
1093    return (env->CP0_Config5 >> CP0C5_XNP) & 1;
1094}
1095
1096void helper_pmon(CPUMIPSState *env, int function)
1097{
1098    function /= 2;
1099    switch (function) {
1100    case 2: /* TODO: char inbyte(int waitflag); */
1101        if (env->active_tc.gpr[4] == 0) {
1102            env->active_tc.gpr[2] = -1;
1103        }
1104        /* Fall through */
1105    case 11: /* TODO: char inbyte (void); */
1106        env->active_tc.gpr[2] = -1;
1107        break;
1108    case 3:
1109    case 12:
1110        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1111        break;
1112    case 17:
1113        break;
1114    case 158:
1115        {
1116            unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
1117            printf("%s", fmt);
1118        }
1119        break;
1120    }
1121}
1122
1123void helper_wait(CPUMIPSState *env)
1124{
1125    CPUState *cs = env_cpu(env);
1126
1127    cs->halted = 1;
1128    cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
1129    /*
1130     * Last instruction in the block, PC was updated before
1131     * - no need to recover PC and icount.
1132     */
1133    raise_exception(env, EXCP_HLT);
1134}
1135
1136#if !defined(CONFIG_USER_ONLY)
1137
1138void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1139                                  MMUAccessType access_type,
1140                                  int mmu_idx, uintptr_t retaddr)
1141{
1142    MIPSCPU *cpu = MIPS_CPU(cs);
1143    CPUMIPSState *env = &cpu->env;
1144    int error_code = 0;
1145    int excp;
1146
1147    if (!(env->hflags & MIPS_HFLAG_DM)) {
1148        env->CP0_BadVAddr = addr;
1149    }
1150
1151    if (access_type == MMU_DATA_STORE) {
1152        excp = EXCP_AdES;
1153    } else {
1154        excp = EXCP_AdEL;
1155        if (access_type == MMU_INST_FETCH) {
1156            error_code |= EXCP_INST_NOTAVAIL;
1157        }
1158    }
1159
1160    do_raise_exception_err(env, excp, error_code, retaddr);
1161}
1162
1163void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1164                                    vaddr addr, unsigned size,
1165                                    MMUAccessType access_type,
1166                                    int mmu_idx, MemTxAttrs attrs,
1167                                    MemTxResult response, uintptr_t retaddr)
1168{
1169    MIPSCPU *cpu = MIPS_CPU(cs);
1170    CPUMIPSState *env = &cpu->env;
1171
1172    if (access_type == MMU_INST_FETCH) {
1173        do_raise_exception(env, EXCP_IBE, retaddr);
1174    } else {
1175        do_raise_exception(env, EXCP_DBE, retaddr);
1176    }
1177}
1178#endif /* !CONFIG_USER_ONLY */
1179
1180
1181/* MSA */
1182/* Data format min and max values */
1183#define DF_BITS(df) (1 << ((df) + 3))
1184
1185/* Element-by-element access macros */
1186#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
1187
1188#if !defined(CONFIG_USER_ONLY)
1189#define MEMOP_IDX(DF)                                           \
1190        TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN,  \
1191                                        cpu_mmu_index(env, false));
1192#else
1193#define MEMOP_IDX(DF)
1194#endif
1195
1196void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
1197                     target_ulong addr)
1198{
1199    wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1200    MEMOP_IDX(DF_BYTE)
1201#if !defined(CONFIG_USER_ONLY)
1202#if !defined(HOST_WORDS_BIGENDIAN)
1203    pwd->b[0]  = helper_ret_ldub_mmu(env, addr + (0  << DF_BYTE), oi, GETPC());
1204    pwd->b[1]  = helper_ret_ldub_mmu(env, addr + (1  << DF_BYTE), oi, GETPC());
1205    pwd->b[2]  = helper_ret_ldub_mmu(env, addr + (2  << DF_BYTE), oi, GETPC());
1206    pwd->b[3]  = helper_ret_ldub_mmu(env, addr + (3  << DF_BYTE), oi, GETPC());
1207    pwd->b[4]  = helper_ret_ldub_mmu(env, addr + (4  << DF_BYTE), oi, GETPC());
1208    pwd->b[5]  = helper_ret_ldub_mmu(env, addr + (5  << DF_BYTE), oi, GETPC());
1209    pwd->b[6]  = helper_ret_ldub_mmu(env, addr + (6  << DF_BYTE), oi, GETPC());
1210    pwd->b[7]  = helper_ret_ldub_mmu(env, addr + (7  << DF_BYTE), oi, GETPC());
1211    pwd->b[8]  = helper_ret_ldub_mmu(env, addr + (8  << DF_BYTE), oi, GETPC());
1212    pwd->b[9]  = helper_ret_ldub_mmu(env, addr + (9  << DF_BYTE), oi, GETPC());
1213    pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
1214    pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
1215    pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
1216    pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
1217    pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
1218    pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
1219#else
1220    pwd->b[0]  = helper_ret_ldub_mmu(env, addr + (7  << DF_BYTE), oi, GETPC());
1221    pwd->b[1]  = helper_ret_ldub_mmu(env, addr + (6  << DF_BYTE), oi, GETPC());
1222    pwd->b[2]  = helper_ret_ldub_mmu(env, addr + (5  << DF_BYTE), oi, GETPC());
1223    pwd->b[3]  = helper_ret_ldub_mmu(env, addr + (4  << DF_BYTE), oi, GETPC());
1224    pwd->b[4]  = helper_ret_ldub_mmu(env, addr + (3  << DF_BYTE), oi, GETPC());
1225    pwd->b[5]  = helper_ret_ldub_mmu(env, addr + (2  << DF_BYTE), oi, GETPC());
1226    pwd->b[6]  = helper_ret_ldub_mmu(env, addr + (1  << DF_BYTE), oi, GETPC());
1227    pwd->b[7]  = helper_ret_ldub_mmu(env, addr + (0  << DF_BYTE), oi, GETPC());
1228    pwd->b[8]  = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
1229    pwd->b[9]  = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
1230    pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
1231    pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
1232    pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
1233    pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
1234    pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9  << DF_BYTE), oi, GETPC());
1235    pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8  << DF_BYTE), oi, GETPC());
1236#endif
1237#else
1238#if !defined(HOST_WORDS_BIGENDIAN)
1239    pwd->b[0]  = cpu_ldub_data(env, addr + (0  << DF_BYTE));
1240    pwd->b[1]  = cpu_ldub_data(env, addr + (1  << DF_BYTE));
1241    pwd->b[2]  = cpu_ldub_data(env, addr + (2  << DF_BYTE));
1242    pwd->b[3]  = cpu_ldub_data(env, addr + (3  << DF_BYTE));
1243    pwd->b[4]  = cpu_ldub_data(env, addr + (4  << DF_BYTE));
1244    pwd->b[5]  = cpu_ldub_data(env, addr + (5  << DF_BYTE));
1245    pwd->b[6]  = cpu_ldub_data(env, addr + (6  << DF_BYTE));
1246    pwd->b[7]  = cpu_ldub_data(env, addr + (7  << DF_BYTE));
1247    pwd->b[8]  = cpu_ldub_data(env, addr + (8  << DF_BYTE));
1248    pwd->b[9]  = cpu_ldub_data(env, addr + (9  << DF_BYTE));
1249    pwd->b[10] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
1250    pwd->b[11] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
1251    pwd->b[12] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
1252    pwd->b[13] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
1253    pwd->b[14] = cpu_ldub_data(env, addr + (14 << DF_BYTE));
1254    pwd->b[15] = cpu_ldub_data(env, addr + (15 << DF_BYTE));
1255#else
1256    pwd->b[0]  = cpu_ldub_data(env, addr + (7  << DF_BYTE));
1257    pwd->b[1]  = cpu_ldub_data(env, addr + (6  << DF_BYTE));
1258    pwd->b[2]  = cpu_ldub_data(env, addr + (5  << DF_BYTE));
1259    pwd->b[3]  = cpu_ldub_data(env, addr + (4  << DF_BYTE));
1260    pwd->b[4]  = cpu_ldub_data(env, addr + (3  << DF_BYTE));
1261    pwd->b[5]  = cpu_ldub_data(env, addr + (2  << DF_BYTE));
1262    pwd->b[6]  = cpu_ldub_data(env, addr + (1  << DF_BYTE));
1263    pwd->b[7]  = cpu_ldub_data(env, addr + (0  << DF_BYTE));
1264    pwd->b[8]  = cpu_ldub_data(env, addr + (15 << DF_BYTE));
1265    pwd->b[9]  = cpu_ldub_data(env, addr + (14 << DF_BYTE));
1266    pwd->b[10] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
1267    pwd->b[11] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
1268    pwd->b[12] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
1269    pwd->b[13] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
1270    pwd->b[14] = cpu_ldub_data(env, addr + (9 << DF_BYTE));
1271    pwd->b[15] = cpu_ldub_data(env, addr + (8 << DF_BYTE));
1272#endif
1273#endif
1274}
1275
1276void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
1277                     target_ulong addr)
1278{
1279    wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1280    MEMOP_IDX(DF_HALF)
1281#if !defined(CONFIG_USER_ONLY)
1282#if !defined(HOST_WORDS_BIGENDIAN)
1283    pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
1284    pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
1285    pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
1286    pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
1287    pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
1288    pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
1289    pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
1290    pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
1291#else
1292    pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
1293    pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
1294    pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
1295    pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
1296    pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
1297    pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
1298    pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
1299    pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
1300#endif
1301#else
1302#if !defined(HOST_WORDS_BIGENDIAN)
1303    pwd->h[0] = cpu_lduw_data(env, addr + (0 << DF_HALF));
1304    pwd->h[1] = cpu_lduw_data(env, addr + (1 << DF_HALF));
1305    pwd->h[2] = cpu_lduw_data(env, addr + (2 << DF_HALF));
1306    pwd->h[3] = cpu_lduw_data(env, addr + (3 << DF_HALF));
1307    pwd->h[4] = cpu_lduw_data(env, addr + (4 << DF_HALF));
1308    pwd->h[5] = cpu_lduw_data(env, addr + (5 << DF_HALF));
1309    pwd->h[6] = cpu_lduw_data(env, addr + (6 << DF_HALF));
1310    pwd->h[7] = cpu_lduw_data(env, addr + (7 << DF_HALF));
1311#else
1312    pwd->h[0] = cpu_lduw_data(env, addr + (3 << DF_HALF));
1313    pwd->h[1] = cpu_lduw_data(env, addr + (2 << DF_HALF));
1314    pwd->h[2] = cpu_lduw_data(env, addr + (1 << DF_HALF));
1315    pwd->h[3] = cpu_lduw_data(env, addr + (0 << DF_HALF));
1316    pwd->h[4] = cpu_lduw_data(env, addr + (7 << DF_HALF));
1317    pwd->h[5] = cpu_lduw_data(env, addr + (6 << DF_HALF));
1318    pwd->h[6] = cpu_lduw_data(env, addr + (5 << DF_HALF));
1319    pwd->h[7] = cpu_lduw_data(env, addr + (4 << DF_HALF));
1320#endif
1321#endif
1322}
1323
1324void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
1325                     target_ulong addr)
1326{
1327    wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1328    MEMOP_IDX(DF_WORD)
1329#if !defined(CONFIG_USER_ONLY)
1330#if !defined(HOST_WORDS_BIGENDIAN)
1331    pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC());
1332    pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC());
1333    pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC());
1334    pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC());
1335#else
1336    pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC());
1337    pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC());
1338    pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC());
1339    pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC());
1340#endif
1341#else
1342#if !defined(HOST_WORDS_BIGENDIAN)
1343    pwd->w[0] = cpu_ldl_data(env, addr + (0 << DF_WORD));
1344    pwd->w[1] = cpu_ldl_data(env, addr + (1 << DF_WORD));
1345    pwd->w[2] = cpu_ldl_data(env, addr + (2 << DF_WORD));
1346    pwd->w[3] = cpu_ldl_data(env, addr + (3 << DF_WORD));
1347#else
1348    pwd->w[0] = cpu_ldl_data(env, addr + (1 << DF_WORD));
1349    pwd->w[1] = cpu_ldl_data(env, addr + (0 << DF_WORD));
1350    pwd->w[2] = cpu_ldl_data(env, addr + (3 << DF_WORD));
1351    pwd->w[3] = cpu_ldl_data(env, addr + (2 << DF_WORD));
1352#endif
1353#endif
1354}
1355
1356void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
1357                     target_ulong addr)
1358{
1359    wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1360    MEMOP_IDX(DF_DOUBLE)
1361#if !defined(CONFIG_USER_ONLY)
1362    pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC());
1363    pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC());
1364#else
1365    pwd->d[0] = cpu_ldq_data(env, addr + (0 << DF_DOUBLE));
1366    pwd->d[1] = cpu_ldq_data(env, addr + (1 << DF_DOUBLE));
1367#endif
1368}
1369
1370#define MSA_PAGESPAN(x) \
1371        ((((x) & ~TARGET_PAGE_MASK) + MSA_WRLEN / 8 - 1) >= TARGET_PAGE_SIZE)
1372
1373static inline void ensure_writable_pages(CPUMIPSState *env,
1374                                         target_ulong addr,
1375                                         int mmu_idx,
1376                                         uintptr_t retaddr)
1377{
1378    /* FIXME: Probe the actual accesses (pass and use a size) */
1379    if (unlikely(MSA_PAGESPAN(addr))) {
1380        /* first page */
1381        probe_write(env, addr, 0, mmu_idx, retaddr);
1382        /* second page */
1383        addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1384        probe_write(env, addr, 0, mmu_idx, retaddr);
1385    }
1386}
1387
1388void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
1389                     target_ulong addr)
1390{
1391    wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1392    int mmu_idx = cpu_mmu_index(env, false);
1393
1394    MEMOP_IDX(DF_BYTE)
1395    ensure_writable_pages(env, addr, mmu_idx, GETPC());
1396#if !defined(CONFIG_USER_ONLY)
1397#if !defined(HOST_WORDS_BIGENDIAN)
1398    helper_ret_stb_mmu(env, addr + (0  << DF_BYTE), pwd->b[0],  oi, GETPC());
1399    helper_ret_stb_mmu(env, addr + (1  << DF_BYTE), pwd->b[1],  oi, GETPC());
1400    helper_ret_stb_mmu(env, addr + (2  << DF_BYTE), pwd->b[2],  oi, GETPC());
1401    helper_ret_stb_mmu(env, addr + (3  << DF_BYTE), pwd->b[3],  oi, GETPC());
1402    helper_ret_stb_mmu(env, addr + (4  << DF_BYTE), pwd->b[4],  oi, GETPC());
1403    helper_ret_stb_mmu(env, addr + (5  << DF_BYTE), pwd->b[5],  oi, GETPC());
1404    helper_ret_stb_mmu(env, addr + (6  << DF_BYTE), pwd->b[6],  oi, GETPC());
1405    helper_ret_stb_mmu(env, addr + (7  << DF_BYTE), pwd->b[7],  oi, GETPC());
1406    helper_ret_stb_mmu(env, addr + (8  << DF_BYTE), pwd->b[8],  oi, GETPC());
1407    helper_ret_stb_mmu(env, addr + (9  << DF_BYTE), pwd->b[9],  oi, GETPC());
1408    helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC());
1409    helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC());
1410    helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC());
1411    helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC());
1412    helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC());
1413    helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC());
1414#else
1415    helper_ret_stb_mmu(env, addr + (7  << DF_BYTE), pwd->b[0],  oi, GETPC());
1416    helper_ret_stb_mmu(env, addr + (6  << DF_BYTE), pwd->b[1],  oi, GETPC());
1417    helper_ret_stb_mmu(env, addr + (5  << DF_BYTE), pwd->b[2],  oi, GETPC());
1418    helper_ret_stb_mmu(env, addr + (4  << DF_BYTE), pwd->b[3],  oi, GETPC());
1419    helper_ret_stb_mmu(env, addr + (3  << DF_BYTE), pwd->b[4],  oi, GETPC());
1420    helper_ret_stb_mmu(env, addr + (2  << DF_BYTE), pwd->b[5],  oi, GETPC());
1421    helper_ret_stb_mmu(env, addr + (1  << DF_BYTE), pwd->b[6],  oi, GETPC());
1422    helper_ret_stb_mmu(env, addr + (0  << DF_BYTE), pwd->b[7],  oi, GETPC());
1423    helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8],  oi, GETPC());
1424    helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9],  oi, GETPC());
1425    helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC());
1426    helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC());
1427    helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC());
1428    helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC());
1429    helper_ret_stb_mmu(env, addr + (9  << DF_BYTE), pwd->b[14], oi, GETPC());
1430    helper_ret_stb_mmu(env, addr + (8  << DF_BYTE), pwd->b[15], oi, GETPC());
1431#endif
1432#else
1433#if !defined(HOST_WORDS_BIGENDIAN)
1434    cpu_stb_data(env, addr + (0  << DF_BYTE), pwd->b[0]);
1435    cpu_stb_data(env, addr + (1  << DF_BYTE), pwd->b[1]);
1436    cpu_stb_data(env, addr + (2  << DF_BYTE), pwd->b[2]);
1437    cpu_stb_data(env, addr + (3  << DF_BYTE), pwd->b[3]);
1438    cpu_stb_data(env, addr + (4  << DF_BYTE), pwd->b[4]);
1439    cpu_stb_data(env, addr + (5  << DF_BYTE), pwd->b[5]);
1440    cpu_stb_data(env, addr + (6  << DF_BYTE), pwd->b[6]);
1441    cpu_stb_data(env, addr + (7  << DF_BYTE), pwd->b[7]);
1442    cpu_stb_data(env, addr + (8  << DF_BYTE), pwd->b[8]);
1443    cpu_stb_data(env, addr + (9  << DF_BYTE), pwd->b[9]);
1444    cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[10]);
1445    cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[11]);
1446    cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[12]);
1447    cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[13]);
1448    cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[14]);
1449    cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[15]);
1450#else
1451    cpu_stb_data(env, addr + (7  << DF_BYTE), pwd->b[0]);
1452    cpu_stb_data(env, addr + (6  << DF_BYTE), pwd->b[1]);
1453    cpu_stb_data(env, addr + (5  << DF_BYTE), pwd->b[2]);
1454    cpu_stb_data(env, addr + (4  << DF_BYTE), pwd->b[3]);
1455    cpu_stb_data(env, addr + (3  << DF_BYTE), pwd->b[4]);
1456    cpu_stb_data(env, addr + (2  << DF_BYTE), pwd->b[5]);
1457    cpu_stb_data(env, addr + (1  << DF_BYTE), pwd->b[6]);
1458    cpu_stb_data(env, addr + (0  << DF_BYTE), pwd->b[7]);
1459    cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[8]);
1460    cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[9]);
1461    cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[10]);
1462    cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[11]);
1463    cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[12]);
1464    cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[13]);
1465    cpu_stb_data(env, addr + (9  << DF_BYTE), pwd->b[14]);
1466    cpu_stb_data(env, addr + (8  << DF_BYTE), pwd->b[15]);
1467#endif
1468#endif
1469}
1470
1471void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
1472                     target_ulong addr)
1473{
1474    wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1475    int mmu_idx = cpu_mmu_index(env, false);
1476
1477    MEMOP_IDX(DF_HALF)
1478    ensure_writable_pages(env, addr, mmu_idx, GETPC());
1479#if !defined(CONFIG_USER_ONLY)
1480#if !defined(HOST_WORDS_BIGENDIAN)
1481    helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC());
1482    helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC());
1483    helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC());
1484    helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC());
1485    helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC());
1486    helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC());
1487    helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC());
1488    helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC());
1489#else
1490    helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC());
1491    helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC());
1492    helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC());
1493    helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC());
1494    helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC());
1495    helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC());
1496    helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC());
1497    helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC());
1498#endif
1499#else
1500#if !defined(HOST_WORDS_BIGENDIAN)
1501    cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[0]);
1502    cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[1]);
1503    cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[2]);
1504    cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[3]);
1505    cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[4]);
1506    cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[5]);
1507    cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[6]);
1508    cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[7]);
1509#else
1510    cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[0]);
1511    cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[1]);
1512    cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[2]);
1513    cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[3]);
1514    cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[4]);
1515    cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[5]);
1516    cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[6]);
1517    cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[7]);
1518#endif
1519#endif
1520}
1521
1522void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
1523                     target_ulong addr)
1524{
1525    wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1526    int mmu_idx = cpu_mmu_index(env, false);
1527
1528    MEMOP_IDX(DF_WORD)
1529    ensure_writable_pages(env, addr, mmu_idx, GETPC());
1530#if !defined(CONFIG_USER_ONLY)
1531#if !defined(HOST_WORDS_BIGENDIAN)
1532    helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC());
1533    helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC());
1534    helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC());
1535    helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC());
1536#else
1537    helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC());
1538    helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC());
1539    helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC());
1540    helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC());
1541#endif
1542#else
1543#if !defined(HOST_WORDS_BIGENDIAN)
1544    cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[0]);
1545    cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[1]);
1546    cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[2]);
1547    cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[3]);
1548#else
1549    cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[0]);
1550    cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[1]);
1551    cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[2]);
1552    cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[3]);
1553#endif
1554#endif
1555}
1556
1557void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
1558                     target_ulong addr)
1559{
1560    wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1561    int mmu_idx = cpu_mmu_index(env, false);
1562
1563    MEMOP_IDX(DF_DOUBLE)
1564    ensure_writable_pages(env, addr, mmu_idx, GETPC());
1565#if !defined(CONFIG_USER_ONLY)
1566    helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC());
1567    helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC());
1568#else
1569    cpu_stq_data(env, addr + (0 << DF_DOUBLE), pwd->d[0]);
1570    cpu_stq_data(env, addr + (1 << DF_DOUBLE), pwd->d[1]);
1571#endif
1572}
1573
1574void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
1575{
1576#ifndef CONFIG_USER_ONLY
1577    target_ulong index = addr & 0x1fffffff;
1578    if (op == 9) {
1579        /* Index Store Tag */
1580        memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
1581                                     MO_64, MEMTXATTRS_UNSPECIFIED);
1582    } else if (op == 5) {
1583        /* Index Load Tag */
1584        memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
1585                                    MO_64, MEMTXATTRS_UNSPECIFIED);
1586    }
1587#endif
1588}
1589