qemu/target/mips/cp0_helper.c
<<
>>
Prefs
   1/*
   2 *  Helpers for emulation of CP0-related MIPS instructions.
   3 *
   4 *  Copyright (C) 2004-2005  Jocelyn Mayer
   5 *  Copyright (C) 2020  Wave Computing, Inc.
   6 *  Copyright (C) 2020  Aleksandar Markovic <amarkovic@wavecomp.com>
   7 *
   8 * This library is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU Lesser General Public
  10 * License as published by the Free Software Foundation; either
  11 * version 2 of the License, or (at your option) any later version.
  12 *
  13 * This library is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * Lesser General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU Lesser General Public
  19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  20 *
  21 */
  22
  23#include "qemu/osdep.h"
  24#include "qemu/main-loop.h"
  25#include "cpu.h"
  26#include "internal.h"
  27#include "qemu/host-utils.h"
  28#include "exec/helper-proto.h"
  29#include "exec/exec-all.h"
  30#include "exec/cpu_ldst.h"
  31#include "exec/memop.h"
  32#include "sysemu/kvm.h"
  33
  34
  35#ifndef CONFIG_USER_ONLY
  36/* SMP helpers.  */
  37static bool mips_vpe_is_wfi(MIPSCPU *c)
  38{
  39    CPUState *cpu = CPU(c);
  40    CPUMIPSState *env = &c->env;
  41
  42    /*
  43     * If the VPE is halted but otherwise active, it means it's waiting for
  44     * an interrupt.\
  45     */
  46    return cpu->halted && mips_vpe_active(env);
  47}
  48
  49static bool mips_vp_is_wfi(MIPSCPU *c)
  50{
  51    CPUState *cpu = CPU(c);
  52    CPUMIPSState *env = &c->env;
  53
  54    return cpu->halted && mips_vp_active(env);
  55}
  56
  57static inline void mips_vpe_wake(MIPSCPU *c)
  58{
  59    /*
  60     * Don't set ->halted = 0 directly, let it be done via cpu_has_work
  61     * because there might be other conditions that state that c should
  62     * be sleeping.
  63     */
  64    qemu_mutex_lock_iothread();
  65    cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
  66    qemu_mutex_unlock_iothread();
  67}
  68
  69static inline void mips_vpe_sleep(MIPSCPU *cpu)
  70{
  71    CPUState *cs = CPU(cpu);
  72
  73    /*
  74     * The VPE was shut off, really go to bed.
  75     * Reset any old _WAKE requests.
  76     */
  77    cs->halted = 1;
  78    cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
  79}
  80
  81static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
  82{
  83    CPUMIPSState *c = &cpu->env;
  84
  85    /* FIXME: TC reschedule.  */
  86    if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
  87        mips_vpe_wake(cpu);
  88    }
  89}
  90
  91static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
  92{
  93    CPUMIPSState *c = &cpu->env;
  94
  95    /* FIXME: TC reschedule.  */
  96    if (!mips_vpe_active(c)) {
  97        mips_vpe_sleep(cpu);
  98    }
  99}
 100
 101/**
 102 * mips_cpu_map_tc:
 103 * @env: CPU from which mapping is performed.
 104 * @tc: Should point to an int with the value of the global TC index.
 105 *
 106 * This function will transform @tc into a local index within the
 107 * returned #CPUMIPSState.
 108 */
 109
 110/*
 111 * FIXME: This code assumes that all VPEs have the same number of TCs,
 112 *        which depends on runtime setup. Can probably be fixed by
 113 *        walking the list of CPUMIPSStates.
 114 */
 115static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
 116{
 117    MIPSCPU *cpu;
 118    CPUState *cs;
 119    CPUState *other_cs;
 120    int vpe_idx;
 121    int tc_idx = *tc;
 122
 123    if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
 124        /* Not allowed to address other CPUs.  */
 125        *tc = env->current_tc;
 126        return env;
 127    }
 128
 129    cs = env_cpu(env);
 130    vpe_idx = tc_idx / cs->nr_threads;
 131    *tc = tc_idx % cs->nr_threads;
 132    other_cs = qemu_get_cpu(vpe_idx);
 133    if (other_cs == NULL) {
 134        return env;
 135    }
 136    cpu = MIPS_CPU(other_cs);
 137    return &cpu->env;
 138}
 139
 140/*
 141 * The per VPE CP0_Status register shares some fields with the per TC
 142 * CP0_TCStatus registers. These fields are wired to the same registers,
 143 * so changes to either of them should be reflected on both registers.
 144 *
 145 * Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
 146 *
 147 * These helper call synchronizes the regs for a given cpu.
 148 */
 149
 150/*
 151 * Called for updates to CP0_Status.  Defined in "cpu.h" for gdbstub.c.
 152 * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
 153 *                                   int tc);
 154 */
 155
 156/* Called for updates to CP0_TCStatus.  */
 157static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
 158                             target_ulong v)
 159{
 160    uint32_t status;
 161    uint32_t tcu, tmx, tasid, tksu;
 162    uint32_t mask = ((1U << CP0St_CU3)
 163                       | (1 << CP0St_CU2)
 164                       | (1 << CP0St_CU1)
 165                       | (1 << CP0St_CU0)
 166                       | (1 << CP0St_MX)
 167                       | (3 << CP0St_KSU));
 168
 169    tcu = (v >> CP0TCSt_TCU0) & 0xf;
 170    tmx = (v >> CP0TCSt_TMX) & 0x1;
 171    tasid = v & cpu->CP0_EntryHi_ASID_mask;
 172    tksu = (v >> CP0TCSt_TKSU) & 0x3;
 173
 174    status = tcu << CP0St_CU0;
 175    status |= tmx << CP0St_MX;
 176    status |= tksu << CP0St_KSU;
 177
 178    cpu->CP0_Status &= ~mask;
 179    cpu->CP0_Status |= status;
 180
 181    /* Sync the TASID with EntryHi.  */
 182    cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask;
 183    cpu->CP0_EntryHi |= tasid;
 184
 185    compute_hflags(cpu);
 186}
 187
 188/* Called for updates to CP0_EntryHi.  */
 189static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
 190{
 191    int32_t *tcst;
 192    uint32_t asid, v = cpu->CP0_EntryHi;
 193
 194    asid = v & cpu->CP0_EntryHi_ASID_mask;
 195
 196    if (tc == cpu->current_tc) {
 197        tcst = &cpu->active_tc.CP0_TCStatus;
 198    } else {
 199        tcst = &cpu->tcs[tc].CP0_TCStatus;
 200    }
 201
 202    *tcst &= ~cpu->CP0_EntryHi_ASID_mask;
 203    *tcst |= asid;
 204}
 205
 206/* CP0 helpers */
 207target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
 208{
 209    return env->mvp->CP0_MVPControl;
 210}
 211
 212target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
 213{
 214    return env->mvp->CP0_MVPConf0;
 215}
 216
 217target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
 218{
 219    return env->mvp->CP0_MVPConf1;
 220}
 221
 222target_ulong helper_mfc0_random(CPUMIPSState *env)
 223{
 224    return (int32_t)cpu_mips_get_random(env);
 225}
 226
 227target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
 228{
 229    return env->active_tc.CP0_TCStatus;
 230}
 231
 232target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
 233{
 234    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 235    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 236
 237    if (other_tc == other->current_tc) {
 238        return other->active_tc.CP0_TCStatus;
 239    } else {
 240        return other->tcs[other_tc].CP0_TCStatus;
 241    }
 242}
 243
 244target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
 245{
 246    return env->active_tc.CP0_TCBind;
 247}
 248
 249target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
 250{
 251    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 252    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 253
 254    if (other_tc == other->current_tc) {
 255        return other->active_tc.CP0_TCBind;
 256    } else {
 257        return other->tcs[other_tc].CP0_TCBind;
 258    }
 259}
 260
 261target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
 262{
 263    return env->active_tc.PC;
 264}
 265
 266target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
 267{
 268    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 269    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 270
 271    if (other_tc == other->current_tc) {
 272        return other->active_tc.PC;
 273    } else {
 274        return other->tcs[other_tc].PC;
 275    }
 276}
 277
 278target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
 279{
 280    return env->active_tc.CP0_TCHalt;
 281}
 282
 283target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
 284{
 285    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 286    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 287
 288    if (other_tc == other->current_tc) {
 289        return other->active_tc.CP0_TCHalt;
 290    } else {
 291        return other->tcs[other_tc].CP0_TCHalt;
 292    }
 293}
 294
 295target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
 296{
 297    return env->active_tc.CP0_TCContext;
 298}
 299
 300target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
 301{
 302    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 303    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 304
 305    if (other_tc == other->current_tc) {
 306        return other->active_tc.CP0_TCContext;
 307    } else {
 308        return other->tcs[other_tc].CP0_TCContext;
 309    }
 310}
 311
 312target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
 313{
 314    return env->active_tc.CP0_TCSchedule;
 315}
 316
 317target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
 318{
 319    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 320    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 321
 322    if (other_tc == other->current_tc) {
 323        return other->active_tc.CP0_TCSchedule;
 324    } else {
 325        return other->tcs[other_tc].CP0_TCSchedule;
 326    }
 327}
 328
 329target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
 330{
 331    return env->active_tc.CP0_TCScheFBack;
 332}
 333
 334target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
 335{
 336    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 337    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 338
 339    if (other_tc == other->current_tc) {
 340        return other->active_tc.CP0_TCScheFBack;
 341    } else {
 342        return other->tcs[other_tc].CP0_TCScheFBack;
 343    }
 344}
 345
 346target_ulong helper_mfc0_count(CPUMIPSState *env)
 347{
 348    return (int32_t)cpu_mips_get_count(env);
 349}
 350
 351target_ulong helper_mfc0_saar(CPUMIPSState *env)
 352{
 353    if ((env->CP0_SAARI & 0x3f) < 2) {
 354        return (int32_t) env->CP0_SAAR[env->CP0_SAARI & 0x3f];
 355    }
 356    return 0;
 357}
 358
 359target_ulong helper_mfhc0_saar(CPUMIPSState *env)
 360{
 361    if ((env->CP0_SAARI & 0x3f) < 2) {
 362        return env->CP0_SAAR[env->CP0_SAARI & 0x3f] >> 32;
 363    }
 364    return 0;
 365}
 366
 367target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
 368{
 369    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 370    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 371
 372    return other->CP0_EntryHi;
 373}
 374
 375target_ulong helper_mftc0_cause(CPUMIPSState *env)
 376{
 377    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 378    int32_t tccause;
 379    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 380
 381    if (other_tc == other->current_tc) {
 382        tccause = other->CP0_Cause;
 383    } else {
 384        tccause = other->CP0_Cause;
 385    }
 386
 387    return tccause;
 388}
 389
 390target_ulong helper_mftc0_status(CPUMIPSState *env)
 391{
 392    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 393    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 394
 395    return other->CP0_Status;
 396}
 397
 398target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
 399{
 400    return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift);
 401}
 402
 403target_ulong helper_mfc0_maar(CPUMIPSState *env)
 404{
 405    return (int32_t) env->CP0_MAAR[env->CP0_MAARI];
 406}
 407
 408target_ulong helper_mfhc0_maar(CPUMIPSState *env)
 409{
 410    return env->CP0_MAAR[env->CP0_MAARI] >> 32;
 411}
 412
 413target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
 414{
 415    return (int32_t)env->CP0_WatchLo[sel];
 416}
 417
 418target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
 419{
 420    return (int32_t) env->CP0_WatchHi[sel];
 421}
 422
 423target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel)
 424{
 425    return env->CP0_WatchHi[sel] >> 32;
 426}
 427
 428target_ulong helper_mfc0_debug(CPUMIPSState *env)
 429{
 430    target_ulong t0 = env->CP0_Debug;
 431    if (env->hflags & MIPS_HFLAG_DM) {
 432        t0 |= 1 << CP0DB_DM;
 433    }
 434
 435    return t0;
 436}
 437
 438target_ulong helper_mftc0_debug(CPUMIPSState *env)
 439{
 440    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 441    int32_t tcstatus;
 442    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 443
 444    if (other_tc == other->current_tc) {
 445        tcstatus = other->active_tc.CP0_Debug_tcstatus;
 446    } else {
 447        tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
 448    }
 449
 450    /* XXX: Might be wrong, check with EJTAG spec. */
 451    return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
 452            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
 453}
 454
 455#if defined(TARGET_MIPS64)
 456target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
 457{
 458    return env->active_tc.PC;
 459}
 460
 461target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
 462{
 463    return env->active_tc.CP0_TCHalt;
 464}
 465
 466target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
 467{
 468    return env->active_tc.CP0_TCContext;
 469}
 470
 471target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
 472{
 473    return env->active_tc.CP0_TCSchedule;
 474}
 475
 476target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
 477{
 478    return env->active_tc.CP0_TCScheFBack;
 479}
 480
 481target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
 482{
 483    return env->CP0_LLAddr >> env->CP0_LLAddr_shift;
 484}
 485
 486target_ulong helper_dmfc0_maar(CPUMIPSState *env)
 487{
 488    return env->CP0_MAAR[env->CP0_MAARI];
 489}
 490
 491target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
 492{
 493    return env->CP0_WatchLo[sel];
 494}
 495
 496target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel)
 497{
 498    return env->CP0_WatchHi[sel];
 499}
 500
 501target_ulong helper_dmfc0_saar(CPUMIPSState *env)
 502{
 503    if ((env->CP0_SAARI & 0x3f) < 2) {
 504        return env->CP0_SAAR[env->CP0_SAARI & 0x3f];
 505    }
 506    return 0;
 507}
 508#endif /* TARGET_MIPS64 */
 509
 510void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
 511{
 512    uint32_t index_p = env->CP0_Index & 0x80000000;
 513    uint32_t tlb_index = arg1 & 0x7fffffff;
 514    if (tlb_index < env->tlb->nb_tlb) {
 515        if (env->insn_flags & ISA_MIPS32R6) {
 516            index_p |= arg1 & 0x80000000;
 517        }
 518        env->CP0_Index = index_p | tlb_index;
 519    }
 520}
 521
 522void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
 523{
 524    uint32_t mask = 0;
 525    uint32_t newval;
 526
 527    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
 528        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
 529                (1 << CP0MVPCo_EVP);
 530    }
 531    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
 532        mask |= (1 << CP0MVPCo_STLB);
 533    }
 534    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
 535
 536    /* TODO: Enable/disable shared TLB, enable/disable VPEs. */
 537
 538    env->mvp->CP0_MVPControl = newval;
 539}
 540
 541void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
 542{
 543    uint32_t mask;
 544    uint32_t newval;
 545
 546    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
 547           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
 548    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
 549
 550    /*
 551     * Yield scheduler intercept not implemented.
 552     * Gating storage scheduler intercept not implemented.
 553     */
 554
 555    /* TODO: Enable/disable TCs. */
 556
 557    env->CP0_VPEControl = newval;
 558}
 559
 560void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
 561{
 562    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 563    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 564    uint32_t mask;
 565    uint32_t newval;
 566
 567    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
 568           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
 569    newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
 570
 571    /* TODO: Enable/disable TCs.  */
 572
 573    other->CP0_VPEControl = newval;
 574}
 575
 576target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
 577{
 578    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 579    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 580    /* FIXME: Mask away return zero on read bits.  */
 581    return other->CP0_VPEControl;
 582}
 583
 584target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
 585{
 586    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 587    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 588
 589    return other->CP0_VPEConf0;
 590}
 591
 592void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
 593{
 594    uint32_t mask = 0;
 595    uint32_t newval;
 596
 597    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
 598        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) {
 599            mask |= (0xff << CP0VPEC0_XTC);
 600        }
 601        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
 602    }
 603    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
 604
 605    /* TODO: TC exclusive handling due to ERL/EXL. */
 606
 607    env->CP0_VPEConf0 = newval;
 608}
 609
 610void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
 611{
 612    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 613    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 614    uint32_t mask = 0;
 615    uint32_t newval;
 616
 617    mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
 618    newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
 619
 620    /* TODO: TC exclusive handling due to ERL/EXL.  */
 621    other->CP0_VPEConf0 = newval;
 622}
 623
 624void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
 625{
 626    uint32_t mask = 0;
 627    uint32_t newval;
 628
 629    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
 630        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
 631                (0xff << CP0VPEC1_NCP1);
 632    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
 633
 634    /* UDI not implemented. */
 635    /* CP2 not implemented. */
 636
 637    /* TODO: Handle FPU (CP1) binding. */
 638
 639    env->CP0_VPEConf1 = newval;
 640}
 641
 642void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
 643{
 644    /* Yield qualifier inputs not implemented. */
 645    env->CP0_YQMask = 0x00000000;
 646}
 647
 648void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
 649{
 650    env->CP0_VPEOpt = arg1 & 0x0000ffff;
 651}
 652
 653#define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
 654
 655void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
 656{
 657    /* 1k pages not implemented */
 658    target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
 659    env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env))
 660                        | (rxi << (CP0EnLo_XI - 30));
 661}
 662
 663#if defined(TARGET_MIPS64)
 664#define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
 665
 666void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
 667{
 668    uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
 669    env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
 670}
 671#endif
 672
 673void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
 674{
 675    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
 676    uint32_t newval;
 677
 678    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
 679
 680    env->active_tc.CP0_TCStatus = newval;
 681    sync_c0_tcstatus(env, env->current_tc, newval);
 682}
 683
 684void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
 685{
 686    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 687    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 688
 689    if (other_tc == other->current_tc) {
 690        other->active_tc.CP0_TCStatus = arg1;
 691    } else {
 692        other->tcs[other_tc].CP0_TCStatus = arg1;
 693    }
 694    sync_c0_tcstatus(other, other_tc, arg1);
 695}
 696
 697void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
 698{
 699    uint32_t mask = (1 << CP0TCBd_TBE);
 700    uint32_t newval;
 701
 702    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
 703        mask |= (1 << CP0TCBd_CurVPE);
 704    }
 705    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
 706    env->active_tc.CP0_TCBind = newval;
 707}
 708
 709void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
 710{
 711    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 712    uint32_t mask = (1 << CP0TCBd_TBE);
 713    uint32_t newval;
 714    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 715
 716    if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
 717        mask |= (1 << CP0TCBd_CurVPE);
 718    }
 719    if (other_tc == other->current_tc) {
 720        newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
 721        other->active_tc.CP0_TCBind = newval;
 722    } else {
 723        newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
 724        other->tcs[other_tc].CP0_TCBind = newval;
 725    }
 726}
 727
 728void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
 729{
 730    env->active_tc.PC = arg1;
 731    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
 732    env->CP0_LLAddr = 0;
 733    env->lladdr = 0;
 734    /* MIPS16 not implemented. */
 735}
 736
 737void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
 738{
 739    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 740    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 741
 742    if (other_tc == other->current_tc) {
 743        other->active_tc.PC = arg1;
 744        other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
 745        other->CP0_LLAddr = 0;
 746        other->lladdr = 0;
 747        /* MIPS16 not implemented. */
 748    } else {
 749        other->tcs[other_tc].PC = arg1;
 750        other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
 751        other->CP0_LLAddr = 0;
 752        other->lladdr = 0;
 753        /* MIPS16 not implemented. */
 754    }
 755}
 756
 757void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
 758{
 759    MIPSCPU *cpu = env_archcpu(env);
 760
 761    env->active_tc.CP0_TCHalt = arg1 & 0x1;
 762
 763    /* TODO: Halt TC / Restart (if allocated+active) TC. */
 764    if (env->active_tc.CP0_TCHalt & 1) {
 765        mips_tc_sleep(cpu, env->current_tc);
 766    } else {
 767        mips_tc_wake(cpu, env->current_tc);
 768    }
 769}
 770
 771void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
 772{
 773    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 774    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 775    MIPSCPU *other_cpu = env_archcpu(other);
 776
 777    /* TODO: Halt TC / Restart (if allocated+active) TC. */
 778
 779    if (other_tc == other->current_tc) {
 780        other->active_tc.CP0_TCHalt = arg1;
 781    } else {
 782        other->tcs[other_tc].CP0_TCHalt = arg1;
 783    }
 784
 785    if (arg1 & 1) {
 786        mips_tc_sleep(other_cpu, other_tc);
 787    } else {
 788        mips_tc_wake(other_cpu, other_tc);
 789    }
 790}
 791
 792void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
 793{
 794    env->active_tc.CP0_TCContext = arg1;
 795}
 796
 797void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
 798{
 799    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 800    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 801
 802    if (other_tc == other->current_tc) {
 803        other->active_tc.CP0_TCContext = arg1;
 804    } else {
 805        other->tcs[other_tc].CP0_TCContext = arg1;
 806    }
 807}
 808
 809void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
 810{
 811    env->active_tc.CP0_TCSchedule = arg1;
 812}
 813
 814void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
 815{
 816    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 817    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 818
 819    if (other_tc == other->current_tc) {
 820        other->active_tc.CP0_TCSchedule = arg1;
 821    } else {
 822        other->tcs[other_tc].CP0_TCSchedule = arg1;
 823    }
 824}
 825
 826void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
 827{
 828    env->active_tc.CP0_TCScheFBack = arg1;
 829}
 830
 831void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
 832{
 833    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
 834    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
 835
 836    if (other_tc == other->current_tc) {
 837        other->active_tc.CP0_TCScheFBack = arg1;
 838    } else {
 839        other->tcs[other_tc].CP0_TCScheFBack = arg1;
 840    }
 841}
 842
 843void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
 844{
 845    /* 1k pages not implemented */
 846    target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
 847    env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env))
 848                        | (rxi << (CP0EnLo_XI - 30));
 849}
 850
 851#if defined(TARGET_MIPS64)
 852void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
 853{
 854    uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
 855    env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
 856}
 857#endif
 858
 859void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
 860{
 861    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
 862}
 863
 864void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1)
 865{
 866    int32_t old;
 867    old = env->CP0_MemoryMapID;
 868    env->CP0_MemoryMapID = (int32_t) arg1;
 869    /* If the MemoryMapID changes, flush qemu's TLB.  */
 870    if (old != env->CP0_MemoryMapID) {
 871        cpu_mips_tlb_flush(env);
 872    }
 873}
 874
 875void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask)
 876{
 877    uint64_t mask = arg1 >> (TARGET_PAGE_BITS + 1);
 878    if (!(env->insn_flags & ISA_MIPS32R6) || (arg1 == ~0) ||
 879        (mask == 0x0000 || mask == 0x0003 || mask == 0x000F ||
 880         mask == 0x003F || mask == 0x00FF || mask == 0x03FF ||
 881         mask == 0x0FFF || mask == 0x3FFF || mask == 0xFFFF)) {
 882        env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
 883    }
 884}
 885
 886void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
 887{
 888    update_pagemask(env, arg1, &env->CP0_PageMask);
 889}
 890
 891void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
 892{
 893    /* SmartMIPS not implemented */
 894    /* 1k pages not implemented */
 895    env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
 896                         (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
 897    compute_hflags(env);
 898    restore_pamask(env);
 899}
 900
 901void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1)
 902{
 903    CPUState *cs = env_cpu(env);
 904
 905    env->CP0_SegCtl0 = arg1 & CP0SC0_MASK;
 906    tlb_flush(cs);
 907}
 908
 909void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1)
 910{
 911    CPUState *cs = env_cpu(env);
 912
 913    env->CP0_SegCtl1 = arg1 & CP0SC1_MASK;
 914    tlb_flush(cs);
 915}
 916
 917void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1)
 918{
 919    CPUState *cs = env_cpu(env);
 920
 921    env->CP0_SegCtl2 = arg1 & CP0SC2_MASK;
 922    tlb_flush(cs);
 923}
 924
 925void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1)
 926{
 927#if defined(TARGET_MIPS64)
 928    uint64_t mask = 0x3F3FFFFFFFULL;
 929    uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL;
 930    uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL;
 931
 932    if ((env->insn_flags & ISA_MIPS32R6)) {
 933        if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) {
 934            mask &= ~(0x3FULL << CP0PF_BDI);
 935        }
 936        if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) {
 937            mask &= ~(0x3FULL << CP0PF_GDI);
 938        }
 939        if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) {
 940            mask &= ~(0x3FULL << CP0PF_UDI);
 941        }
 942        if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) {
 943            mask &= ~(0x3FULL << CP0PF_MDI);
 944        }
 945        if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) {
 946            mask &= ~(0x3FULL << CP0PF_PTI);
 947        }
 948    }
 949    env->CP0_PWField = arg1 & mask;
 950
 951    if ((new_ptei >= 32) ||
 952            ((env->insn_flags & ISA_MIPS32R6) &&
 953                    (new_ptei == 0 || new_ptei == 1))) {
 954        env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) |
 955                (old_ptei << CP0PF_PTEI);
 956    }
 957#else
 958    uint32_t mask = 0x3FFFFFFF;
 959    uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
 960    uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F;
 961
 962    if ((env->insn_flags & ISA_MIPS32R6)) {
 963        if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) {
 964            mask &= ~(0x3F << CP0PF_GDW);
 965        }
 966        if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) {
 967            mask &= ~(0x3F << CP0PF_UDW);
 968        }
 969        if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) {
 970            mask &= ~(0x3F << CP0PF_MDW);
 971        }
 972        if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) {
 973            mask &= ~(0x3F << CP0PF_PTW);
 974        }
 975    }
 976    env->CP0_PWField = arg1 & mask;
 977
 978    if ((new_ptew >= 32) ||
 979            ((env->insn_flags & ISA_MIPS32R6) &&
 980                    (new_ptew == 0 || new_ptew == 1))) {
 981        env->CP0_PWField = (env->CP0_PWField & ~0x3F) |
 982                (old_ptew << CP0PF_PTEW);
 983    }
 984#endif
 985}
 986
 987void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1)
 988{
 989#if defined(TARGET_MIPS64)
 990    env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL;
 991#else
 992    env->CP0_PWSize = arg1 & 0x3FFFFFFF;
 993#endif
 994}
 995
 996void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
 997{
 998    if (env->insn_flags & ISA_MIPS32R6) {
 999        if (arg1 < env->tlb->nb_tlb) {
1000            env->CP0_Wired = arg1;
1001        }
1002    } else {
1003        env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1004    }
1005}
1006
1007void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1)
1008{
1009#if defined(TARGET_MIPS64)
1010    /* PWEn = 0. Hardware page table walking is not implemented. */
1011    env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F);
1012#else
1013    env->CP0_PWCtl = (arg1 & 0x800000FF);
1014#endif
1015}
1016
1017void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
1018{
1019    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1020}
1021
1022void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
1023{
1024    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1025}
1026
1027void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
1028{
1029    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1030}
1031
1032void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
1033{
1034    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1035}
1036
1037void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
1038{
1039    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1040}
1041
1042void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
1043{
1044    uint32_t mask = 0x0000000F;
1045
1046    if ((env->CP0_Config1 & (1 << CP0C1_PC)) &&
1047        (env->insn_flags & ISA_MIPS32R6)) {
1048        mask |= (1 << 4);
1049    }
1050    if (env->insn_flags & ISA_MIPS32R6) {
1051        mask |= (1 << 5);
1052    }
1053    if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
1054        mask |= (1 << 29);
1055
1056        if (arg1 & (1 << 29)) {
1057            env->hflags |= MIPS_HFLAG_HWRENA_ULR;
1058        } else {
1059            env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
1060        }
1061    }
1062
1063    env->CP0_HWREna = arg1 & mask;
1064}
1065
1066void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
1067{
1068    cpu_mips_store_count(env, arg1);
1069}
1070
1071void helper_mtc0_saari(CPUMIPSState *env, target_ulong arg1)
1072{
1073    uint32_t target = arg1 & 0x3f;
1074    if (target <= 1) {
1075        env->CP0_SAARI = target;
1076    }
1077}
1078
1079void helper_mtc0_saar(CPUMIPSState *env, target_ulong arg1)
1080{
1081    uint32_t target = env->CP0_SAARI & 0x3f;
1082    if (target < 2) {
1083        env->CP0_SAAR[target] = arg1 & 0x00000ffffffff03fULL;
1084        switch (target) {
1085        case 0:
1086            if (env->itu) {
1087                itc_reconfigure(env->itu);
1088            }
1089            break;
1090        }
1091    }
1092}
1093
1094void helper_mthc0_saar(CPUMIPSState *env, target_ulong arg1)
1095{
1096    uint32_t target = env->CP0_SAARI & 0x3f;
1097    if (target < 2) {
1098        env->CP0_SAAR[target] =
1099            (((uint64_t) arg1 << 32) & 0x00000fff00000000ULL) |
1100            (env->CP0_SAAR[target] & 0x00000000ffffffffULL);
1101        switch (target) {
1102        case 0:
1103            if (env->itu) {
1104                itc_reconfigure(env->itu);
1105            }
1106            break;
1107        }
1108    }
1109}
1110
1111void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1112{
1113    target_ulong old, val, mask;
1114    mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask;
1115    if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
1116        mask |= 1 << CP0EnHi_EHINV;
1117    }
1118
1119    /* 1k pages not implemented */
1120#if defined(TARGET_MIPS64)
1121    if (env->insn_flags & ISA_MIPS32R6) {
1122        int entryhi_r = extract64(arg1, 62, 2);
1123        int config0_at = extract32(env->CP0_Config0, 13, 2);
1124        bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
1125        if ((entryhi_r == 2) ||
1126            (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
1127            /* skip EntryHi.R field if new value is reserved */
1128            mask &= ~(0x3ull << 62);
1129        }
1130    }
1131    mask &= env->SEGMask;
1132#endif
1133    old = env->CP0_EntryHi;
1134    val = (arg1 & mask) | (old & ~mask);
1135    env->CP0_EntryHi = val;
1136    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1137        sync_c0_entryhi(env, env->current_tc);
1138    }
1139    /* If the ASID changes, flush qemu's TLB.  */
1140    if ((old & env->CP0_EntryHi_ASID_mask) !=
1141        (val & env->CP0_EntryHi_ASID_mask)) {
1142        tlb_flush(env_cpu(env));
1143    }
1144}
1145
1146void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1147{
1148    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1149    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1150
1151    other->CP0_EntryHi = arg1;
1152    sync_c0_entryhi(other, other_tc);
1153}
1154
1155void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
1156{
1157    cpu_mips_store_compare(env, arg1);
1158}
1159
1160void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
1161{
1162    uint32_t val, old;
1163
1164    old = env->CP0_Status;
1165    cpu_mips_store_status(env, arg1);
1166    val = env->CP0_Status;
1167
1168    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1169        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1170                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1171                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1172                env->CP0_Cause);
1173        switch (cpu_mmu_index(env, false)) {
1174        case 3:
1175            qemu_log(", ERL\n");
1176            break;
1177        case MIPS_HFLAG_UM:
1178            qemu_log(", UM\n");
1179            break;
1180        case MIPS_HFLAG_SM:
1181            qemu_log(", SM\n");
1182            break;
1183        case MIPS_HFLAG_KM:
1184            qemu_log("\n");
1185            break;
1186        default:
1187            cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
1188            break;
1189        }
1190    }
1191}
1192
1193void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
1194{
1195    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1196    uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
1197    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1198
1199    other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
1200    sync_c0_status(env, other, other_tc);
1201}
1202
1203void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
1204{
1205    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1206}
1207
1208void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
1209{
1210    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1211    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1212}
1213
1214void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
1215{
1216    cpu_mips_store_cause(env, arg1);
1217}
1218
1219void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
1220{
1221    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1222    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1223
1224    cpu_mips_store_cause(other, arg1);
1225}
1226
1227target_ulong helper_mftc0_epc(CPUMIPSState *env)
1228{
1229    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1230    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1231
1232    return other->CP0_EPC;
1233}
1234
1235target_ulong helper_mftc0_ebase(CPUMIPSState *env)
1236{
1237    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1238    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1239
1240    return other->CP0_EBase;
1241}
1242
1243void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
1244{
1245    target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1246    if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1247        mask |= ~0x3FFFFFFF;
1248    }
1249    env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask);
1250}
1251
1252void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
1253{
1254    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1255    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1256    target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1257    if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1258        mask |= ~0x3FFFFFFF;
1259    }
1260    other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask);
1261}
1262
1263target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
1264{
1265    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1266    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1267
1268    switch (idx) {
1269    case 0: return other->CP0_Config0;
1270    case 1: return other->CP0_Config1;
1271    case 2: return other->CP0_Config2;
1272    case 3: return other->CP0_Config3;
1273    /* 4 and 5 are reserved.  */
1274    case 6: return other->CP0_Config6;
1275    case 7: return other->CP0_Config7;
1276    default:
1277        break;
1278    }
1279    return 0;
1280}
1281
1282void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
1283{
1284    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1285}
1286
1287void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
1288{
1289    /* tertiary/secondary caches not implemented */
1290    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1291}
1292
1293void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
1294{
1295    if (env->insn_flags & ASE_MICROMIPS) {
1296        env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
1297                           (arg1 & (1 << CP0C3_ISA_ON_EXC));
1298    }
1299}
1300
1301void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
1302{
1303    env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
1304                       (arg1 & env->CP0_Config4_rw_bitmask);
1305}
1306
1307void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
1308{
1309    env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
1310                       (arg1 & env->CP0_Config5_rw_bitmask);
1311    env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ?
1312            0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff;
1313    compute_hflags(env);
1314}
1315
1316void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
1317{
1318    target_long mask = env->CP0_LLAddr_rw_bitmask;
1319    arg1 = arg1 << env->CP0_LLAddr_shift;
1320    env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask);
1321}
1322
1323#define MTC0_MAAR_MASK(env) \
1324        ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
1325
1326void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1)
1327{
1328    env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env);
1329}
1330
1331void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1)
1332{
1333    env->CP0_MAAR[env->CP0_MAARI] =
1334        (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) |
1335        (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL);
1336}
1337
1338void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1)
1339{
1340    int index = arg1 & 0x3f;
1341    if (index == 0x3f) {
1342        /*
1343         * Software may write all ones to INDEX to determine the
1344         *  maximum value supported.
1345         */
1346        env->CP0_MAARI = MIPS_MAAR_MAX - 1;
1347    } else if (index < MIPS_MAAR_MAX) {
1348        env->CP0_MAARI = index;
1349    }
1350    /*
1351     * Other than the all ones, if the value written is not supported,
1352     * then INDEX is unchanged from its previous value.
1353     */
1354}
1355
1356void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1357{
1358    /*
1359     * Watch exceptions for instructions, data loads, data stores
1360     * not implemented.
1361     */
1362    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1363}
1364
1365void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1366{
1367    uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID);
1368    if ((env->CP0_Config5 >> CP0C5_MI) & 1) {
1369        mask |= 0xFFFFFFFF00000000ULL; /* MMID */
1370    }
1371    env->CP0_WatchHi[sel] = arg1 & mask;
1372    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1373}
1374
1375void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1376{
1377    env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) |
1378                            (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL);
1379}
1380
1381void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
1382{
1383    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1384    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1385}
1386
1387void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
1388{
1389    env->CP0_Framemask = arg1; /* XXX */
1390}
1391
1392void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
1393{
1394    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1395    if (arg1 & (1 << CP0DB_DM)) {
1396        env->hflags |= MIPS_HFLAG_DM;
1397    } else {
1398        env->hflags &= ~MIPS_HFLAG_DM;
1399    }
1400}
1401
1402void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
1403{
1404    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1405    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1406    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1407
1408    /* XXX: Might be wrong, check with EJTAG spec. */
1409    if (other_tc == other->current_tc) {
1410        other->active_tc.CP0_Debug_tcstatus = val;
1411    } else {
1412        other->tcs[other_tc].CP0_Debug_tcstatus = val;
1413    }
1414    other->CP0_Debug = (other->CP0_Debug &
1415                     ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1416                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1417}
1418
1419void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
1420{
1421    env->CP0_Performance0 = arg1 & 0x000007ff;
1422}
1423
1424void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1)
1425{
1426    int32_t wst = arg1 & (1 << CP0EC_WST);
1427    int32_t spr = arg1 & (1 << CP0EC_SPR);
1428    int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0;
1429
1430    env->CP0_ErrCtl = wst | spr | itc;
1431
1432    if (itc && !wst && !spr) {
1433        env->hflags |= MIPS_HFLAG_ITC_CACHE;
1434    } else {
1435        env->hflags &= ~MIPS_HFLAG_ITC_CACHE;
1436    }
1437}
1438
1439void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
1440{
1441    if (env->hflags & MIPS_HFLAG_ITC_CACHE) {
1442        /*
1443         * If CACHE instruction is configured for ITC tags then make all
1444         * CP0.TagLo bits writable. The actual write to ITC Configuration
1445         * Tag will take care of the read-only bits.
1446         */
1447        env->CP0_TagLo = arg1;
1448    } else {
1449        env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1450    }
1451}
1452
1453void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
1454{
1455    env->CP0_DataLo = arg1; /* XXX */
1456}
1457
1458void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
1459{
1460    env->CP0_TagHi = arg1; /* XXX */
1461}
1462
1463void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
1464{
1465    env->CP0_DataHi = arg1; /* XXX */
1466}
1467
1468/* MIPS MT functions */
1469target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
1470{
1471    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1472    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1473
1474    if (other_tc == other->current_tc) {
1475        return other->active_tc.gpr[sel];
1476    } else {
1477        return other->tcs[other_tc].gpr[sel];
1478    }
1479}
1480
1481target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
1482{
1483    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1484    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1485
1486    if (other_tc == other->current_tc) {
1487        return other->active_tc.LO[sel];
1488    } else {
1489        return other->tcs[other_tc].LO[sel];
1490    }
1491}
1492
1493target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
1494{
1495    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1496    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1497
1498    if (other_tc == other->current_tc) {
1499        return other->active_tc.HI[sel];
1500    } else {
1501        return other->tcs[other_tc].HI[sel];
1502    }
1503}
1504
1505target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
1506{
1507    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1508    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1509
1510    if (other_tc == other->current_tc) {
1511        return other->active_tc.ACX[sel];
1512    } else {
1513        return other->tcs[other_tc].ACX[sel];
1514    }
1515}
1516
1517target_ulong helper_mftdsp(CPUMIPSState *env)
1518{
1519    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1520    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1521
1522    if (other_tc == other->current_tc) {
1523        return other->active_tc.DSPControl;
1524    } else {
1525        return other->tcs[other_tc].DSPControl;
1526    }
1527}
1528
1529void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1530{
1531    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1532    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1533
1534    if (other_tc == other->current_tc) {
1535        other->active_tc.gpr[sel] = arg1;
1536    } else {
1537        other->tcs[other_tc].gpr[sel] = arg1;
1538    }
1539}
1540
1541void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1542{
1543    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1544    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1545
1546    if (other_tc == other->current_tc) {
1547        other->active_tc.LO[sel] = arg1;
1548    } else {
1549        other->tcs[other_tc].LO[sel] = arg1;
1550    }
1551}
1552
1553void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1554{
1555    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1556    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1557
1558    if (other_tc == other->current_tc) {
1559        other->active_tc.HI[sel] = arg1;
1560    } else {
1561        other->tcs[other_tc].HI[sel] = arg1;
1562    }
1563}
1564
1565void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1566{
1567    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1568    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1569
1570    if (other_tc == other->current_tc) {
1571        other->active_tc.ACX[sel] = arg1;
1572    } else {
1573        other->tcs[other_tc].ACX[sel] = arg1;
1574    }
1575}
1576
1577void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
1578{
1579    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1580    CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1581
1582    if (other_tc == other->current_tc) {
1583        other->active_tc.DSPControl = arg1;
1584    } else {
1585        other->tcs[other_tc].DSPControl = arg1;
1586    }
1587}
1588
1589/* MIPS MT functions */
1590target_ulong helper_dmt(void)
1591{
1592    /* TODO */
1593    return 0;
1594}
1595
1596target_ulong helper_emt(void)
1597{
1598    /* TODO */
1599    return 0;
1600}
1601
1602target_ulong helper_dvpe(CPUMIPSState *env)
1603{
1604    CPUState *other_cs = first_cpu;
1605    target_ulong prev = env->mvp->CP0_MVPControl;
1606
1607    CPU_FOREACH(other_cs) {
1608        MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1609        /* Turn off all VPEs except the one executing the dvpe.  */
1610        if (&other_cpu->env != env) {
1611            other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1612            mips_vpe_sleep(other_cpu);
1613        }
1614    }
1615    return prev;
1616}
1617
1618target_ulong helper_evpe(CPUMIPSState *env)
1619{
1620    CPUState *other_cs = first_cpu;
1621    target_ulong prev = env->mvp->CP0_MVPControl;
1622
1623    CPU_FOREACH(other_cs) {
1624        MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1625
1626        if (&other_cpu->env != env
1627            /* If the VPE is WFI, don't disturb its sleep.  */
1628            && !mips_vpe_is_wfi(other_cpu)) {
1629            /* Enable the VPE.  */
1630            other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1631            mips_vpe_wake(other_cpu); /* And wake it up.  */
1632        }
1633    }
1634    return prev;
1635}
1636#endif /* !CONFIG_USER_ONLY */
1637
1638/* R6 Multi-threading */
1639#ifndef CONFIG_USER_ONLY
1640target_ulong helper_dvp(CPUMIPSState *env)
1641{
1642    CPUState *other_cs = first_cpu;
1643    target_ulong prev = env->CP0_VPControl;
1644
1645    if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
1646        CPU_FOREACH(other_cs) {
1647            MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1648            /* Turn off all VPs except the one executing the dvp. */
1649            if (&other_cpu->env != env) {
1650                mips_vpe_sleep(other_cpu);
1651            }
1652        }
1653        env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
1654    }
1655    return prev;
1656}
1657
1658target_ulong helper_evp(CPUMIPSState *env)
1659{
1660    CPUState *other_cs = first_cpu;
1661    target_ulong prev = env->CP0_VPControl;
1662
1663    if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
1664        CPU_FOREACH(other_cs) {
1665            MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1666            if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
1667                /*
1668                 * If the VP is WFI, don't disturb its sleep.
1669                 * Otherwise, wake it up.
1670                 */
1671                mips_vpe_wake(other_cpu);
1672            }
1673        }
1674        env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
1675    }
1676    return prev;
1677}
1678#endif /* !CONFIG_USER_ONLY */
1679