linux/arch/powerpc/platforms/cell/spufs/switch.c
<<
>>
Prefs
   1/*
   2 * spu_switch.c
   3 *
   4 * (C) Copyright IBM Corp. 2005
   5 *
   6 * Author: Mark Nutter <mnutter@us.ibm.com>
   7 *
   8 * Host-side part of SPU context switch sequence outlined in
   9 * Synergistic Processor Element, Book IV.
  10 *
  11 * A fully premptive switch of an SPE is very expensive in terms
  12 * of time and system resources.  SPE Book IV indicates that SPE
  13 * allocation should follow a "serially reusable device" model,
  14 * in which the SPE is assigned a task until it completes.  When
  15 * this is not possible, this sequence may be used to premptively
  16 * save, and then later (optionally) restore the context of a
  17 * program executing on an SPE.
  18 *
  19 *
  20 * This program is free software; you can redistribute it and/or modify
  21 * it under the terms of the GNU General Public License as published by
  22 * the Free Software Foundation; either version 2, or (at your option)
  23 * any later version.
  24 *
  25 * This program is distributed in the hope that it will be useful,
  26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  28 * GNU General Public License for more details.
  29 *
  30 * You should have received a copy of the GNU General Public License
  31 * along with this program; if not, write to the Free Software
  32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  33 */
  34
  35#include <linux/module.h>
  36#include <linux/errno.h>
  37#include <linux/hardirq.h>
  38#include <linux/sched.h>
  39#include <linux/kernel.h>
  40#include <linux/mm.h>
  41#include <linux/vmalloc.h>
  42#include <linux/smp.h>
  43#include <linux/stddef.h>
  44#include <linux/unistd.h>
  45
  46#include <asm/io.h>
  47#include <asm/spu.h>
  48#include <asm/spu_priv1.h>
  49#include <asm/spu_csa.h>
  50#include <asm/mmu_context.h>
  51
  52#include "spufs.h"
  53
  54#include "spu_save_dump.h"
  55#include "spu_restore_dump.h"
  56
  57#if 0
  58#define POLL_WHILE_TRUE(_c) {                           \
  59    do {                                                \
  60    } while (_c);                                       \
  61  }
  62#else
  63#define RELAX_SPIN_COUNT                                1000
  64#define POLL_WHILE_TRUE(_c) {                           \
  65    do {                                                \
  66        int _i;                                         \
  67        for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
  68            cpu_relax();                                \
  69        }                                               \
  70        if (unlikely(_c)) yield();                      \
  71        else break;                                     \
  72    } while (_c);                                       \
  73  }
  74#endif                          /* debug */
  75
  76#define POLL_WHILE_FALSE(_c)    POLL_WHILE_TRUE(!(_c))
  77
  78static inline void acquire_spu_lock(struct spu *spu)
  79{
  80        /* Save, Step 1:
  81         * Restore, Step 1:
  82         *    Acquire SPU-specific mutual exclusion lock.
  83         *    TBD.
  84         */
  85}
  86
  87static inline void release_spu_lock(struct spu *spu)
  88{
  89        /* Restore, Step 76:
  90         *    Release SPU-specific mutual exclusion lock.
  91         *    TBD.
  92         */
  93}
  94
  95static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
  96{
  97        struct spu_problem __iomem *prob = spu->problem;
  98        u32 isolate_state;
  99
 100        /* Save, Step 2:
 101         * Save, Step 6:
 102         *     If SPU_Status[E,L,IS] any field is '1', this
 103         *     SPU is in isolate state and cannot be context
 104         *     saved at this time.
 105         */
 106        isolate_state = SPU_STATUS_ISOLATED_STATE |
 107            SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
 108        return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
 109}
 110
 111static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
 112{
 113        /* Save, Step 3:
 114         * Restore, Step 2:
 115         *     Save INT_Mask_class0 in CSA.
 116         *     Write INT_MASK_class0 with value of 0.
 117         *     Save INT_Mask_class1 in CSA.
 118         *     Write INT_MASK_class1 with value of 0.
 119         *     Save INT_Mask_class2 in CSA.
 120         *     Write INT_MASK_class2 with value of 0.
 121         *     Synchronize all three interrupts to be sure
 122         *     we no longer execute a handler on another CPU.
 123         */
 124        spin_lock_irq(&spu->register_lock);
 125        if (csa) {
 126                csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
 127                csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
 128                csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
 129        }
 130        spu_int_mask_set(spu, 0, 0ul);
 131        spu_int_mask_set(spu, 1, 0ul);
 132        spu_int_mask_set(spu, 2, 0ul);
 133        eieio();
 134        spin_unlock_irq(&spu->register_lock);
 135
 136        /*
 137         * This flag needs to be set before calling synchronize_irq so
 138         * that the update will be visible to the relevant handlers
 139         * via a simple load.
 140         */
 141        set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
 142        clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
 143        synchronize_irq(spu->irqs[0]);
 144        synchronize_irq(spu->irqs[1]);
 145        synchronize_irq(spu->irqs[2]);
 146}
 147
 148static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
 149{
 150        /* Save, Step 4:
 151         * Restore, Step 25.
 152         *    Set a software watchdog timer, which specifies the
 153         *    maximum allowable time for a context save sequence.
 154         *
 155         *    For present, this implementation will not set a global
 156         *    watchdog timer, as virtualization & variable system load
 157         *    may cause unpredictable execution times.
 158         */
 159}
 160
 161static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
 162{
 163        /* Save, Step 5:
 164         * Restore, Step 3:
 165         *     Inhibit user-space access (if provided) to this
 166         *     SPU by unmapping the virtual pages assigned to
 167         *     the SPU memory-mapped I/O (MMIO) for problem
 168         *     state. TBD.
 169         */
 170}
 171
 172static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
 173{
 174        /* Save, Step 7:
 175         * Restore, Step 5:
 176         *     Set a software context switch pending flag.
 177         *     Done above in Step 3 - disable_interrupts().
 178         */
 179}
 180
 181static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
 182{
 183        struct spu_priv2 __iomem *priv2 = spu->priv2;
 184
 185        /* Save, Step 8:
 186         *     Suspend DMA and save MFC_CNTL.
 187         */
 188        switch (in_be64(&priv2->mfc_control_RW) &
 189               MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
 190        case MFC_CNTL_SUSPEND_IN_PROGRESS:
 191                POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
 192                                  MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
 193                                 MFC_CNTL_SUSPEND_COMPLETE);
 194                /* fall through */
 195        case MFC_CNTL_SUSPEND_COMPLETE:
 196                if (csa)
 197                        csa->priv2.mfc_control_RW =
 198                                in_be64(&priv2->mfc_control_RW) |
 199                                MFC_CNTL_SUSPEND_DMA_QUEUE;
 200                break;
 201        case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
 202                out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
 203                POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
 204                                  MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
 205                                 MFC_CNTL_SUSPEND_COMPLETE);
 206                if (csa)
 207                        csa->priv2.mfc_control_RW =
 208                                in_be64(&priv2->mfc_control_RW) &
 209                                ~MFC_CNTL_SUSPEND_DMA_QUEUE &
 210                                ~MFC_CNTL_SUSPEND_MASK;
 211                break;
 212        }
 213}
 214
 215static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
 216{
 217        struct spu_problem __iomem *prob = spu->problem;
 218
 219        /* Save, Step 9:
 220         *     Save SPU_Runcntl in the CSA.  This value contains
 221         *     the "Application Desired State".
 222         */
 223        csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
 224}
 225
 226static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
 227{
 228        /* Save, Step 10:
 229         *     Save MFC_SR1 in the CSA.
 230         */
 231        csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
 232}
 233
 234static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
 235{
 236        struct spu_problem __iomem *prob = spu->problem;
 237
 238        /* Save, Step 11:
 239         *     Read SPU_Status[R], and save to CSA.
 240         */
 241        if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
 242                csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
 243        } else {
 244                u32 stopped;
 245
 246                out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
 247                eieio();
 248                POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
 249                                SPU_STATUS_RUNNING);
 250                stopped =
 251                    SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
 252                    SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
 253                if ((in_be32(&prob->spu_status_R) & stopped) == 0)
 254                        csa->prob.spu_status_R = SPU_STATUS_RUNNING;
 255                else
 256                        csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
 257        }
 258}
 259
 260static inline void save_mfc_stopped_status(struct spu_state *csa,
 261                struct spu *spu)
 262{
 263        struct spu_priv2 __iomem *priv2 = spu->priv2;
 264        const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
 265                        MFC_CNTL_DMA_QUEUES_EMPTY;
 266
 267        /* Save, Step 12:
 268         *     Read MFC_CNTL[Ds].  Update saved copy of
 269         *     CSA.MFC_CNTL[Ds].
 270         *
 271         * update: do the same with MFC_CNTL[Q].
 272         */
 273        csa->priv2.mfc_control_RW &= ~mask;
 274        csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
 275}
 276
 277static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
 278{
 279        struct spu_priv2 __iomem *priv2 = spu->priv2;
 280
 281        /* Save, Step 13:
 282         *     Write MFC_CNTL[Dh] set to a '1' to halt
 283         *     the decrementer.
 284         */
 285        out_be64(&priv2->mfc_control_RW,
 286                 MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
 287        eieio();
 288}
 289
 290static inline void save_timebase(struct spu_state *csa, struct spu *spu)
 291{
 292        /* Save, Step 14:
 293         *    Read PPE Timebase High and Timebase low registers
 294         *    and save in CSA.  TBD.
 295         */
 296        csa->suspend_time = get_cycles();
 297}
 298
 299static inline void remove_other_spu_access(struct spu_state *csa,
 300                                           struct spu *spu)
 301{
 302        /* Save, Step 15:
 303         *     Remove other SPU access to this SPU by unmapping
 304         *     this SPU's pages from their address space.  TBD.
 305         */
 306}
 307
 308static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
 309{
 310        struct spu_problem __iomem *prob = spu->problem;
 311
 312        /* Save, Step 16:
 313         * Restore, Step 11.
 314         *     Write SPU_MSSync register. Poll SPU_MSSync[P]
 315         *     for a value of 0.
 316         */
 317        out_be64(&prob->spc_mssync_RW, 1UL);
 318        POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
 319}
 320
 321static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
 322{
 323        /* Save, Step 17:
 324         * Restore, Step 12.
 325         * Restore, Step 48.
 326         *     Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
 327         *     Then issue a PPE sync instruction.
 328         */
 329        spu_tlb_invalidate(spu);
 330        mb();
 331}
 332
 333static inline void handle_pending_interrupts(struct spu_state *csa,
 334                                             struct spu *spu)
 335{
 336        /* Save, Step 18:
 337         *     Handle any pending interrupts from this SPU
 338         *     here.  This is OS or hypervisor specific.  One
 339         *     option is to re-enable interrupts to handle any
 340         *     pending interrupts, with the interrupt handlers
 341         *     recognizing the software Context Switch Pending
 342         *     flag, to ensure the SPU execution or MFC command
 343         *     queue is not restarted.  TBD.
 344         */
 345}
 346
 347static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
 348{
 349        struct spu_priv2 __iomem *priv2 = spu->priv2;
 350        int i;
 351
 352        /* Save, Step 19:
 353         *     If MFC_Cntl[Se]=0 then save
 354         *     MFC command queues.
 355         */
 356        if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
 357                for (i = 0; i < 8; i++) {
 358                        csa->priv2.puq[i].mfc_cq_data0_RW =
 359                            in_be64(&priv2->puq[i].mfc_cq_data0_RW);
 360                        csa->priv2.puq[i].mfc_cq_data1_RW =
 361                            in_be64(&priv2->puq[i].mfc_cq_data1_RW);
 362                        csa->priv2.puq[i].mfc_cq_data2_RW =
 363                            in_be64(&priv2->puq[i].mfc_cq_data2_RW);
 364                        csa->priv2.puq[i].mfc_cq_data3_RW =
 365                            in_be64(&priv2->puq[i].mfc_cq_data3_RW);
 366                }
 367                for (i = 0; i < 16; i++) {
 368                        csa->priv2.spuq[i].mfc_cq_data0_RW =
 369                            in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
 370                        csa->priv2.spuq[i].mfc_cq_data1_RW =
 371                            in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
 372                        csa->priv2.spuq[i].mfc_cq_data2_RW =
 373                            in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
 374                        csa->priv2.spuq[i].mfc_cq_data3_RW =
 375                            in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
 376                }
 377        }
 378}
 379
 380static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
 381{
 382        struct spu_problem __iomem *prob = spu->problem;
 383
 384        /* Save, Step 20:
 385         *     Save the PPU_QueryMask register
 386         *     in the CSA.
 387         */
 388        csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
 389}
 390
 391static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
 392{
 393        struct spu_problem __iomem *prob = spu->problem;
 394
 395        /* Save, Step 21:
 396         *     Save the PPU_QueryType register
 397         *     in the CSA.
 398         */
 399        csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
 400}
 401
 402static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
 403{
 404        struct spu_problem __iomem *prob = spu->problem;
 405
 406        /* Save the Prxy_TagStatus register in the CSA.
 407         *
 408         * It is unnecessary to restore dma_tagstatus_R, however,
 409         * dma_tagstatus_R in the CSA is accessed via backing_ops, so
 410         * we must save it.
 411         */
 412        csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
 413}
 414
 415static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
 416{
 417        struct spu_priv2 __iomem *priv2 = spu->priv2;
 418
 419        /* Save, Step 22:
 420         *     Save the MFC_CSR_TSQ register
 421         *     in the LSCSA.
 422         */
 423        csa->priv2.spu_tag_status_query_RW =
 424            in_be64(&priv2->spu_tag_status_query_RW);
 425}
 426
 427static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
 428{
 429        struct spu_priv2 __iomem *priv2 = spu->priv2;
 430
 431        /* Save, Step 23:
 432         *     Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
 433         *     registers in the CSA.
 434         */
 435        csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
 436        csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
 437}
 438
 439static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
 440{
 441        struct spu_priv2 __iomem *priv2 = spu->priv2;
 442
 443        /* Save, Step 24:
 444         *     Save the MFC_CSR_ATO register in
 445         *     the CSA.
 446         */
 447        csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
 448}
 449
 450static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
 451{
 452        /* Save, Step 25:
 453         *     Save the MFC_TCLASS_ID register in
 454         *     the CSA.
 455         */
 456        csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
 457}
 458
 459static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
 460{
 461        /* Save, Step 26:
 462         * Restore, Step 23.
 463         *     Write the MFC_TCLASS_ID register with
 464         *     the value 0x10000000.
 465         */
 466        spu_mfc_tclass_id_set(spu, 0x10000000);
 467        eieio();
 468}
 469
 470static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
 471{
 472        struct spu_priv2 __iomem *priv2 = spu->priv2;
 473
 474        /* Save, Step 27:
 475         * Restore, Step 14.
 476         *     Write MFC_CNTL[Pc]=1 (purge queue).
 477         */
 478        out_be64(&priv2->mfc_control_RW,
 479                        MFC_CNTL_PURGE_DMA_REQUEST |
 480                        MFC_CNTL_SUSPEND_MASK);
 481        eieio();
 482}
 483
 484static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
 485{
 486        struct spu_priv2 __iomem *priv2 = spu->priv2;
 487
 488        /* Save, Step 28:
 489         *     Poll MFC_CNTL[Ps] until value '11' is read
 490         *     (purge complete).
 491         */
 492        POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
 493                         MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
 494                         MFC_CNTL_PURGE_DMA_COMPLETE);
 495}
 496
 497static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
 498{
 499        /* Save, Step 30:
 500         * Restore, Step 18:
 501         *     Write MFC_SR1 with MFC_SR1[D=0,S=1] and
 502         *     MFC_SR1[TL,R,Pr,T] set correctly for the
 503         *     OS specific environment.
 504         *
 505         *     Implementation note: The SPU-side code
 506         *     for save/restore is privileged, so the
 507         *     MFC_SR1[Pr] bit is not set.
 508         *
 509         */
 510        spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
 511                              MFC_STATE1_RELOCATE_MASK |
 512                              MFC_STATE1_BUS_TLBIE_MASK));
 513}
 514
 515static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
 516{
 517        struct spu_problem __iomem *prob = spu->problem;
 518
 519        /* Save, Step 31:
 520         *     Save SPU_NPC in the CSA.
 521         */
 522        csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
 523}
 524
 525static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
 526{
 527        struct spu_priv2 __iomem *priv2 = spu->priv2;
 528
 529        /* Save, Step 32:
 530         *     Save SPU_PrivCntl in the CSA.
 531         */
 532        csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
 533}
 534
 535static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
 536{
 537        struct spu_priv2 __iomem *priv2 = spu->priv2;
 538
 539        /* Save, Step 33:
 540         * Restore, Step 16:
 541         *     Write SPU_PrivCntl[S,Le,A] fields reset to 0.
 542         */
 543        out_be64(&priv2->spu_privcntl_RW, 0UL);
 544        eieio();
 545}
 546
 547static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
 548{
 549        struct spu_priv2 __iomem *priv2 = spu->priv2;
 550
 551        /* Save, Step 34:
 552         *     Save SPU_LSLR in the CSA.
 553         */
 554        csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
 555}
 556
 557static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
 558{
 559        struct spu_priv2 __iomem *priv2 = spu->priv2;
 560
 561        /* Save, Step 35:
 562         * Restore, Step 17.
 563         *     Reset SPU_LSLR.
 564         */
 565        out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
 566        eieio();
 567}
 568
 569static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
 570{
 571        struct spu_priv2 __iomem *priv2 = spu->priv2;
 572
 573        /* Save, Step 36:
 574         *     Save SPU_Cfg in the CSA.
 575         */
 576        csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
 577}
 578
 579static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
 580{
 581        /* Save, Step 37:
 582         *     Save PM_Trace_Tag_Wait_Mask in the CSA.
 583         *     Not performed by this implementation.
 584         */
 585}
 586
 587static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
 588{
 589        /* Save, Step 38:
 590         *     Save RA_GROUP_ID register and the
 591         *     RA_ENABLE reigster in the CSA.
 592         */
 593        csa->priv1.resource_allocation_groupID_RW =
 594                spu_resource_allocation_groupID_get(spu);
 595        csa->priv1.resource_allocation_enable_RW =
 596                spu_resource_allocation_enable_get(spu);
 597}
 598
 599static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
 600{
 601        struct spu_problem __iomem *prob = spu->problem;
 602
 603        /* Save, Step 39:
 604         *     Save MB_Stat register in the CSA.
 605         */
 606        csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
 607}
 608
 609static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
 610{
 611        struct spu_problem __iomem *prob = spu->problem;
 612
 613        /* Save, Step 40:
 614         *     Save the PPU_MB register in the CSA.
 615         */
 616        csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
 617}
 618
 619static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
 620{
 621        struct spu_priv2 __iomem *priv2 = spu->priv2;
 622
 623        /* Save, Step 41:
 624         *     Save the PPUINT_MB register in the CSA.
 625         */
 626        csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
 627}
 628
 629static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
 630{
 631        struct spu_priv2 __iomem *priv2 = spu->priv2;
 632        u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
 633        int i;
 634
 635        /* Save, Step 42:
 636         */
 637
 638        /* Save CH 1, without channel count */
 639        out_be64(&priv2->spu_chnlcntptr_RW, 1);
 640        csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
 641
 642        /* Save the following CH: [0,3,4,24,25,27] */
 643        for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
 644                idx = ch_indices[i];
 645                out_be64(&priv2->spu_chnlcntptr_RW, idx);
 646                eieio();
 647                csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
 648                csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
 649                out_be64(&priv2->spu_chnldata_RW, 0UL);
 650                out_be64(&priv2->spu_chnlcnt_RW, 0UL);
 651                eieio();
 652        }
 653}
 654
 655static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
 656{
 657        struct spu_priv2 __iomem *priv2 = spu->priv2;
 658        int i;
 659
 660        /* Save, Step 43:
 661         *     Save SPU Read Mailbox Channel.
 662         */
 663        out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
 664        eieio();
 665        csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
 666        for (i = 0; i < 4; i++) {
 667                csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
 668        }
 669        out_be64(&priv2->spu_chnlcnt_RW, 0UL);
 670        eieio();
 671}
 672
 673static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
 674{
 675        struct spu_priv2 __iomem *priv2 = spu->priv2;
 676
 677        /* Save, Step 44:
 678         *     Save MFC_CMD Channel.
 679         */
 680        out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
 681        eieio();
 682        csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
 683        eieio();
 684}
 685
 686static inline void reset_ch(struct spu_state *csa, struct spu *spu)
 687{
 688        struct spu_priv2 __iomem *priv2 = spu->priv2;
 689        u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
 690        u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
 691        u64 idx;
 692        int i;
 693
 694        /* Save, Step 45:
 695         *     Reset the following CH: [21, 23, 28, 30]
 696         */
 697        for (i = 0; i < 4; i++) {
 698                idx = ch_indices[i];
 699                out_be64(&priv2->spu_chnlcntptr_RW, idx);
 700                eieio();
 701                out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
 702                eieio();
 703        }
 704}
 705
 706static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
 707{
 708        struct spu_priv2 __iomem *priv2 = spu->priv2;
 709
 710        /* Save, Step 46:
 711         * Restore, Step 25.
 712         *     Write MFC_CNTL[Sc]=0 (resume queue processing).
 713         */
 714        out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
 715}
 716
 717static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
 718                unsigned int *code, int code_size)
 719{
 720        /* Save, Step 47:
 721         * Restore, Step 30.
 722         *     If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
 723         *     register, then initialize SLB_VSID and SLB_ESID
 724         *     to provide access to SPU context save code and
 725         *     LSCSA.
 726         *
 727         *     This implementation places both the context
 728         *     switch code and LSCSA in kernel address space.
 729         *
 730         *     Further this implementation assumes that the
 731         *     MFC_SR1[R]=1 (in other words, assume that
 732         *     translation is desired by OS environment).
 733         */
 734        spu_invalidate_slbs(spu);
 735        spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
 736}
 737
 738static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
 739{
 740        /* Save, Step 48:
 741         * Restore, Step 23.
 742         *     Change the software context switch pending flag
 743         *     to context switch active.  This implementation does
 744         *     not uses a switch active flag.
 745         *
 746         * Now that we have saved the mfc in the csa, we can add in the
 747         * restart command if an exception occurred.
 748         */
 749        if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
 750                csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
 751        clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
 752        mb();
 753}
 754
 755static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
 756{
 757        unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
 758            CLASS1_ENABLE_STORAGE_FAULT_INTR;
 759
 760        /* Save, Step 49:
 761         * Restore, Step 22:
 762         *     Reset and then enable interrupts, as
 763         *     needed by OS.
 764         *
 765         *     This implementation enables only class1
 766         *     (translation) interrupts.
 767         */
 768        spin_lock_irq(&spu->register_lock);
 769        spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
 770        spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
 771        spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
 772        spu_int_mask_set(spu, 0, 0ul);
 773        spu_int_mask_set(spu, 1, class1_mask);
 774        spu_int_mask_set(spu, 2, 0ul);
 775        spin_unlock_irq(&spu->register_lock);
 776}
 777
 778static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
 779                               unsigned int ls_offset, unsigned int size,
 780                               unsigned int tag, unsigned int rclass,
 781                               unsigned int cmd)
 782{
 783        struct spu_problem __iomem *prob = spu->problem;
 784        union mfc_tag_size_class_cmd command;
 785        unsigned int transfer_size;
 786        volatile unsigned int status = 0x0;
 787
 788        while (size > 0) {
 789                transfer_size =
 790                    (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
 791                command.u.mfc_size = transfer_size;
 792                command.u.mfc_tag = tag;
 793                command.u.mfc_rclassid = rclass;
 794                command.u.mfc_cmd = cmd;
 795                do {
 796                        out_be32(&prob->mfc_lsa_W, ls_offset);
 797                        out_be64(&prob->mfc_ea_W, ea);
 798                        out_be64(&prob->mfc_union_W.all64, command.all64);
 799                        status =
 800                            in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
 801                        if (unlikely(status & 0x2)) {
 802                                cpu_relax();
 803                        }
 804                } while (status & 0x3);
 805                size -= transfer_size;
 806                ea += transfer_size;
 807                ls_offset += transfer_size;
 808        }
 809        return 0;
 810}
 811
 812static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
 813{
 814        unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
 815        unsigned int ls_offset = 0x0;
 816        unsigned int size = 16384;
 817        unsigned int tag = 0;
 818        unsigned int rclass = 0;
 819        unsigned int cmd = MFC_PUT_CMD;
 820
 821        /* Save, Step 50:
 822         *     Issue a DMA command to copy the first 16K bytes
 823         *     of local storage to the CSA.
 824         */
 825        send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
 826}
 827
 828static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
 829{
 830        struct spu_problem __iomem *prob = spu->problem;
 831
 832        /* Save, Step 51:
 833         * Restore, Step 31.
 834         *     Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
 835         *     point address of context save code in local
 836         *     storage.
 837         *
 838         *     This implementation uses SPU-side save/restore
 839         *     programs with entry points at LSA of 0.
 840         */
 841        out_be32(&prob->spu_npc_RW, 0);
 842        eieio();
 843}
 844
 845static inline void set_signot1(struct spu_state *csa, struct spu *spu)
 846{
 847        struct spu_problem __iomem *prob = spu->problem;
 848        union {
 849                u64 ull;
 850                u32 ui[2];
 851        } addr64;
 852
 853        /* Save, Step 52:
 854         * Restore, Step 32:
 855         *    Write SPU_Sig_Notify_1 register with upper 32-bits
 856         *    of the CSA.LSCSA effective address.
 857         */
 858        addr64.ull = (u64) csa->lscsa;
 859        out_be32(&prob->signal_notify1, addr64.ui[0]);
 860        eieio();
 861}
 862
 863static inline void set_signot2(struct spu_state *csa, struct spu *spu)
 864{
 865        struct spu_problem __iomem *prob = spu->problem;
 866        union {
 867                u64 ull;
 868                u32 ui[2];
 869        } addr64;
 870
 871        /* Save, Step 53:
 872         * Restore, Step 33:
 873         *    Write SPU_Sig_Notify_2 register with lower 32-bits
 874         *    of the CSA.LSCSA effective address.
 875         */
 876        addr64.ull = (u64) csa->lscsa;
 877        out_be32(&prob->signal_notify2, addr64.ui[1]);
 878        eieio();
 879}
 880
 881static inline void send_save_code(struct spu_state *csa, struct spu *spu)
 882{
 883        unsigned long addr = (unsigned long)&spu_save_code[0];
 884        unsigned int ls_offset = 0x0;
 885        unsigned int size = sizeof(spu_save_code);
 886        unsigned int tag = 0;
 887        unsigned int rclass = 0;
 888        unsigned int cmd = MFC_GETFS_CMD;
 889
 890        /* Save, Step 54:
 891         *     Issue a DMA command to copy context save code
 892         *     to local storage and start SPU.
 893         */
 894        send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
 895}
 896
 897static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
 898{
 899        struct spu_problem __iomem *prob = spu->problem;
 900
 901        /* Save, Step 55:
 902         * Restore, Step 38.
 903         *     Write PPU_QueryMask=1 (enable Tag Group 0)
 904         *     and issue eieio instruction.
 905         */
 906        out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
 907        eieio();
 908}
 909
 910static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
 911{
 912        struct spu_problem __iomem *prob = spu->problem;
 913        u32 mask = MFC_TAGID_TO_TAGMASK(0);
 914        unsigned long flags;
 915
 916        /* Save, Step 56:
 917         * Restore, Step 39.
 918         * Restore, Step 39.
 919         * Restore, Step 46.
 920         *     Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
 921         *     or write PPU_QueryType[TS]=01 and wait for Tag Group
 922         *     Complete Interrupt.  Write INT_Stat_Class0 or
 923         *     INT_Stat_Class2 with value of 'handled'.
 924         */
 925        POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
 926
 927        local_irq_save(flags);
 928        spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
 929        spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
 930        local_irq_restore(flags);
 931}
 932
 933static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
 934{
 935        struct spu_problem __iomem *prob = spu->problem;
 936        unsigned long flags;
 937
 938        /* Save, Step 57:
 939         * Restore, Step 40.
 940         *     Poll until SPU_Status[R]=0 or wait for SPU Class 0
 941         *     or SPU Class 2 interrupt.  Write INT_Stat_class0
 942         *     or INT_Stat_class2 with value of handled.
 943         */
 944        POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
 945
 946        local_irq_save(flags);
 947        spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
 948        spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
 949        local_irq_restore(flags);
 950}
 951
 952static inline int check_save_status(struct spu_state *csa, struct spu *spu)
 953{
 954        struct spu_problem __iomem *prob = spu->problem;
 955        u32 complete;
 956
 957        /* Save, Step 54:
 958         *     If SPU_Status[P]=1 and SPU_Status[SC] = "success",
 959         *     context save succeeded, otherwise context save
 960         *     failed.
 961         */
 962        complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
 963                    SPU_STATUS_STOPPED_BY_STOP);
 964        return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
 965}
 966
 967static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
 968{
 969        /* Restore, Step 4:
 970         *    If required, notify the "using application" that
 971         *    the SPU task has been terminated.  TBD.
 972         */
 973}
 974
 975static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
 976                struct spu *spu)
 977{
 978        struct spu_priv2 __iomem *priv2 = spu->priv2;
 979
 980        /* Restore, Step 7:
 981         *     Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend
 982         *     the queue and halt the decrementer.
 983         */
 984        out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
 985                 MFC_CNTL_DECREMENTER_HALTED);
 986        eieio();
 987}
 988
 989static inline void wait_suspend_mfc_complete(struct spu_state *csa,
 990                                             struct spu *spu)
 991{
 992        struct spu_priv2 __iomem *priv2 = spu->priv2;
 993
 994        /* Restore, Step 8:
 995         * Restore, Step 47.
 996         *     Poll MFC_CNTL[Ss] until 11 is returned.
 997         */
 998        POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
 999                         MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
1000                         MFC_CNTL_SUSPEND_COMPLETE);
1001}
1002
1003static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
1004{
1005        struct spu_problem __iomem *prob = spu->problem;
1006
1007        /* Restore, Step 9:
1008         *    If SPU_Status[R]=1, stop SPU execution
1009         *    and wait for stop to complete.
1010         *
1011         *    Returns       1 if SPU_Status[R]=1 on entry.
1012         *                  0 otherwise
1013         */
1014        if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1015                if (in_be32(&prob->spu_status_R) &
1016                    SPU_STATUS_ISOLATED_EXIT_STATUS) {
1017                        POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1018                                        SPU_STATUS_RUNNING);
1019                }
1020                if ((in_be32(&prob->spu_status_R) &
1021                     SPU_STATUS_ISOLATED_LOAD_STATUS)
1022                    || (in_be32(&prob->spu_status_R) &
1023                        SPU_STATUS_ISOLATED_STATE)) {
1024                        out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1025                        eieio();
1026                        POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1027                                        SPU_STATUS_RUNNING);
1028                        out_be32(&prob->spu_runcntl_RW, 0x2);
1029                        eieio();
1030                        POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1031                                        SPU_STATUS_RUNNING);
1032                }
1033                if (in_be32(&prob->spu_status_R) &
1034                    SPU_STATUS_WAITING_FOR_CHANNEL) {
1035                        out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1036                        eieio();
1037                        POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1038                                        SPU_STATUS_RUNNING);
1039                }
1040                return 1;
1041        }
1042        return 0;
1043}
1044
1045static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1046{
1047        struct spu_problem __iomem *prob = spu->problem;
1048
1049        /* Restore, Step 10:
1050         *    If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1051         *    release SPU from isolate state.
1052         */
1053        if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1054                if (in_be32(&prob->spu_status_R) &
1055                    SPU_STATUS_ISOLATED_EXIT_STATUS) {
1056                        spu_mfc_sr1_set(spu,
1057                                        MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1058                        eieio();
1059                        out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1060                        eieio();
1061                        POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1062                                        SPU_STATUS_RUNNING);
1063                }
1064                if ((in_be32(&prob->spu_status_R) &
1065                     SPU_STATUS_ISOLATED_LOAD_STATUS)
1066                    || (in_be32(&prob->spu_status_R) &
1067                        SPU_STATUS_ISOLATED_STATE)) {
1068                        spu_mfc_sr1_set(spu,
1069                                        MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1070                        eieio();
1071                        out_be32(&prob->spu_runcntl_RW, 0x2);
1072                        eieio();
1073                        POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1074                                        SPU_STATUS_RUNNING);
1075                }
1076        }
1077}
1078
1079static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1080{
1081        struct spu_priv2 __iomem *priv2 = spu->priv2;
1082        u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1083        u64 idx;
1084        int i;
1085
1086        /* Restore, Step 20:
1087         */
1088
1089        /* Reset CH 1 */
1090        out_be64(&priv2->spu_chnlcntptr_RW, 1);
1091        out_be64(&priv2->spu_chnldata_RW, 0UL);
1092
1093        /* Reset the following CH: [0,3,4,24,25,27] */
1094        for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1095                idx = ch_indices[i];
1096                out_be64(&priv2->spu_chnlcntptr_RW, idx);
1097                eieio();
1098                out_be64(&priv2->spu_chnldata_RW, 0UL);
1099                out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1100                eieio();
1101        }
1102}
1103
1104static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1105{
1106        struct spu_priv2 __iomem *priv2 = spu->priv2;
1107        u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1108        u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1109        u64 idx;
1110        int i;
1111
1112        /* Restore, Step 21:
1113         *     Reset the following CH: [21, 23, 28, 29, 30]
1114         */
1115        for (i = 0; i < 5; i++) {
1116                idx = ch_indices[i];
1117                out_be64(&priv2->spu_chnlcntptr_RW, idx);
1118                eieio();
1119                out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1120                eieio();
1121        }
1122}
1123
1124static inline void setup_spu_status_part1(struct spu_state *csa,
1125                                          struct spu *spu)
1126{
1127        u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1128        u32 status_I = SPU_STATUS_INVALID_INSTR;
1129        u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1130        u32 status_S = SPU_STATUS_SINGLE_STEP;
1131        u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1132        u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1133        u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1134        u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1135        u32 status_code;
1136
1137        /* Restore, Step 27:
1138         *     If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1139         *     instruction sequence to the end of the SPU based restore
1140         *     code (after the "context restored" stop and signal) to
1141         *     restore the correct SPU status.
1142         *
1143         *     NOTE: Rather than modifying the SPU executable, we
1144         *     instead add a new 'stopped_status' field to the
1145         *     LSCSA.  The SPU-side restore reads this field and
1146         *     takes the appropriate action when exiting.
1147         */
1148
1149        status_code =
1150            (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1151        if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1152
1153                /* SPU_Status[P,I]=1 - Illegal Instruction followed
1154                 * by Stop and Signal instruction, followed by 'br -4'.
1155                 *
1156                 */
1157                csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1158                csa->lscsa->stopped_status.slot[1] = status_code;
1159
1160        } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1161
1162                /* SPU_Status[P,H]=1 - Halt Conditional, followed
1163                 * by Stop and Signal instruction, followed by
1164                 * 'br -4'.
1165                 */
1166                csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1167                csa->lscsa->stopped_status.slot[1] = status_code;
1168
1169        } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1170
1171                /* SPU_Status[S,P]=1 - Stop and Signal instruction
1172                 * followed by 'br -4'.
1173                 */
1174                csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1175                csa->lscsa->stopped_status.slot[1] = status_code;
1176
1177        } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1178
1179                /* SPU_Status[S,I]=1 - Illegal instruction followed
1180                 * by 'br -4'.
1181                 */
1182                csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1183                csa->lscsa->stopped_status.slot[1] = status_code;
1184
1185        } else if ((csa->prob.spu_status_R & status_P) == status_P) {
1186
1187                /* SPU_Status[P]=1 - Stop and Signal instruction
1188                 * followed by 'br -4'.
1189                 */
1190                csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1191                csa->lscsa->stopped_status.slot[1] = status_code;
1192
1193        } else if ((csa->prob.spu_status_R & status_H) == status_H) {
1194
1195                /* SPU_Status[H]=1 - Halt Conditional, followed
1196                 * by 'br -4'.
1197                 */
1198                csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1199
1200        } else if ((csa->prob.spu_status_R & status_S) == status_S) {
1201
1202                /* SPU_Status[S]=1 - Two nop instructions.
1203                 */
1204                csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1205
1206        } else if ((csa->prob.spu_status_R & status_I) == status_I) {
1207
1208                /* SPU_Status[I]=1 - Illegal instruction followed
1209                 * by 'br -4'.
1210                 */
1211                csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1212
1213        }
1214}
1215
1216static inline void setup_spu_status_part2(struct spu_state *csa,
1217                                          struct spu *spu)
1218{
1219        u32 mask;
1220
1221        /* Restore, Step 28:
1222         *     If the CSA.SPU_Status[I,S,H,P,R]=0 then
1223         *     add a 'br *' instruction to the end of
1224         *     the SPU based restore code.
1225         *
1226         *     NOTE: Rather than modifying the SPU executable, we
1227         *     instead add a new 'stopped_status' field to the
1228         *     LSCSA.  The SPU-side restore reads this field and
1229         *     takes the appropriate action when exiting.
1230         */
1231        mask = SPU_STATUS_INVALID_INSTR |
1232            SPU_STATUS_SINGLE_STEP |
1233            SPU_STATUS_STOPPED_BY_HALT |
1234            SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1235        if (!(csa->prob.spu_status_R & mask)) {
1236                csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1237        }
1238}
1239
1240static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1241{
1242        /* Restore, Step 29:
1243         *     Restore RA_GROUP_ID register and the
1244         *     RA_ENABLE reigster from the CSA.
1245         */
1246        spu_resource_allocation_groupID_set(spu,
1247                        csa->priv1.resource_allocation_groupID_RW);
1248        spu_resource_allocation_enable_set(spu,
1249                        csa->priv1.resource_allocation_enable_RW);
1250}
1251
1252static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1253{
1254        unsigned long addr = (unsigned long)&spu_restore_code[0];
1255        unsigned int ls_offset = 0x0;
1256        unsigned int size = sizeof(spu_restore_code);
1257        unsigned int tag = 0;
1258        unsigned int rclass = 0;
1259        unsigned int cmd = MFC_GETFS_CMD;
1260
1261        /* Restore, Step 37:
1262         *     Issue MFC DMA command to copy context
1263         *     restore code to local storage.
1264         */
1265        send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1266}
1267
1268static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1269{
1270        /* Restore, Step 34:
1271         *     If CSA.MFC_CNTL[Ds]=1 (decrementer was
1272         *     running) then adjust decrementer, set
1273         *     decrementer running status in LSCSA,
1274         *     and set decrementer "wrapped" status
1275         *     in LSCSA.
1276         */
1277        if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1278                cycles_t resume_time = get_cycles();
1279                cycles_t delta_time = resume_time - csa->suspend_time;
1280
1281                csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
1282                if (csa->lscsa->decr.slot[0] < delta_time) {
1283                        csa->lscsa->decr_status.slot[0] |=
1284                                 SPU_DECR_STATUS_WRAPPED;
1285                }
1286
1287                csa->lscsa->decr.slot[0] -= delta_time;
1288        } else {
1289                csa->lscsa->decr_status.slot[0] = 0;
1290        }
1291}
1292
1293static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1294{
1295        /* Restore, Step 35:
1296         *     Copy the CSA.PU_MB data into the LSCSA.
1297         */
1298        csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1299}
1300
1301static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1302{
1303        /* Restore, Step 36:
1304         *     Copy the CSA.PUINT_MB data into the LSCSA.
1305         */
1306        csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1307}
1308
1309static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1310{
1311        struct spu_problem __iomem *prob = spu->problem;
1312        u32 complete;
1313
1314        /* Restore, Step 40:
1315         *     If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1316         *     context restore succeeded, otherwise context restore
1317         *     failed.
1318         */
1319        complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1320                    SPU_STATUS_STOPPED_BY_STOP);
1321        return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1322}
1323
1324static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1325{
1326        struct spu_priv2 __iomem *priv2 = spu->priv2;
1327
1328        /* Restore, Step 41:
1329         *     Restore SPU_PrivCntl from the CSA.
1330         */
1331        out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1332        eieio();
1333}
1334
1335static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1336{
1337        struct spu_problem __iomem *prob = spu->problem;
1338        u32 mask;
1339
1340        /* Restore, Step 42:
1341         *     If any CSA.SPU_Status[I,S,H,P]=1, then
1342         *     restore the error or single step state.
1343         */
1344        mask = SPU_STATUS_INVALID_INSTR |
1345            SPU_STATUS_SINGLE_STEP |
1346            SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1347        if (csa->prob.spu_status_R & mask) {
1348                out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1349                eieio();
1350                POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1351                                SPU_STATUS_RUNNING);
1352        }
1353}
1354
1355static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1356{
1357        struct spu_problem __iomem *prob = spu->problem;
1358        u32 mask;
1359
1360        /* Restore, Step 43:
1361         *     If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1362         *     SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1363         *     then write '00' to SPU_RunCntl[R0R1] and wait
1364         *     for SPU_Status[R]=0.
1365         */
1366        mask = SPU_STATUS_INVALID_INSTR |
1367            SPU_STATUS_SINGLE_STEP |
1368            SPU_STATUS_STOPPED_BY_HALT |
1369            SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1370        if (!(csa->prob.spu_status_R & mask)) {
1371                out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1372                eieio();
1373                POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1374                                 SPU_STATUS_RUNNING);
1375                out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1376                eieio();
1377                POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1378                                SPU_STATUS_RUNNING);
1379        }
1380}
1381
1382static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1383{
1384        unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1385        unsigned int ls_offset = 0x0;
1386        unsigned int size = 16384;
1387        unsigned int tag = 0;
1388        unsigned int rclass = 0;
1389        unsigned int cmd = MFC_GET_CMD;
1390
1391        /* Restore, Step 44:
1392         *     Issue a DMA command to restore the first
1393         *     16kb of local storage from CSA.
1394         */
1395        send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1396}
1397
1398static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1399{
1400        struct spu_priv2 __iomem *priv2 = spu->priv2;
1401
1402        /* Restore, Step 47.
1403         *     Write MFC_Cntl[Sc,Sm]='1','0' to suspend
1404         *     the queue.
1405         */
1406        out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
1407        eieio();
1408}
1409
1410static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1411{
1412        /* Restore, Step 49:
1413         *     Write INT_MASK_class0 with value of 0.
1414         *     Write INT_MASK_class1 with value of 0.
1415         *     Write INT_MASK_class2 with value of 0.
1416         *     Write INT_STAT_class0 with value of -1.
1417         *     Write INT_STAT_class1 with value of -1.
1418         *     Write INT_STAT_class2 with value of -1.
1419         */
1420        spin_lock_irq(&spu->register_lock);
1421        spu_int_mask_set(spu, 0, 0ul);
1422        spu_int_mask_set(spu, 1, 0ul);
1423        spu_int_mask_set(spu, 2, 0ul);
1424        spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
1425        spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
1426        spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
1427        spin_unlock_irq(&spu->register_lock);
1428}
1429
1430static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1431{
1432        struct spu_priv2 __iomem *priv2 = spu->priv2;
1433        int i;
1434
1435        /* Restore, Step 50:
1436         *     If MFC_Cntl[Se]!=0 then restore
1437         *     MFC command queues.
1438         */
1439        if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1440                for (i = 0; i < 8; i++) {
1441                        out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1442                                 csa->priv2.puq[i].mfc_cq_data0_RW);
1443                        out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1444                                 csa->priv2.puq[i].mfc_cq_data1_RW);
1445                        out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1446                                 csa->priv2.puq[i].mfc_cq_data2_RW);
1447                        out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1448                                 csa->priv2.puq[i].mfc_cq_data3_RW);
1449                }
1450                for (i = 0; i < 16; i++) {
1451                        out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1452                                 csa->priv2.spuq[i].mfc_cq_data0_RW);
1453                        out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1454                                 csa->priv2.spuq[i].mfc_cq_data1_RW);
1455                        out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1456                                 csa->priv2.spuq[i].mfc_cq_data2_RW);
1457                        out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1458                                 csa->priv2.spuq[i].mfc_cq_data3_RW);
1459                }
1460        }
1461        eieio();
1462}
1463
1464static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1465{
1466        struct spu_problem __iomem *prob = spu->problem;
1467
1468        /* Restore, Step 51:
1469         *     Restore the PPU_QueryMask register from CSA.
1470         */
1471        out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1472        eieio();
1473}
1474
1475static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1476{
1477        struct spu_problem __iomem *prob = spu->problem;
1478
1479        /* Restore, Step 52:
1480         *     Restore the PPU_QueryType register from CSA.
1481         */
1482        out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1483        eieio();
1484}
1485
1486static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1487{
1488        struct spu_priv2 __iomem *priv2 = spu->priv2;
1489
1490        /* Restore, Step 53:
1491         *     Restore the MFC_CSR_TSQ register from CSA.
1492         */
1493        out_be64(&priv2->spu_tag_status_query_RW,
1494                 csa->priv2.spu_tag_status_query_RW);
1495        eieio();
1496}
1497
1498static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1499{
1500        struct spu_priv2 __iomem *priv2 = spu->priv2;
1501
1502        /* Restore, Step 54:
1503         *     Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1504         *     registers from CSA.
1505         */
1506        out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1507        out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1508        eieio();
1509}
1510
1511static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1512{
1513        struct spu_priv2 __iomem *priv2 = spu->priv2;
1514
1515        /* Restore, Step 55:
1516         *     Restore the MFC_CSR_ATO register from CSA.
1517         */
1518        out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1519}
1520
1521static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1522{
1523        /* Restore, Step 56:
1524         *     Restore the MFC_TCLASS_ID register from CSA.
1525         */
1526        spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1527        eieio();
1528}
1529
1530static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1531{
1532        u64 ch0_cnt, ch0_data;
1533        u64 ch1_data;
1534
1535        /* Restore, Step 57:
1536         *    Set the Lock Line Reservation Lost Event by:
1537         *      1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1538         *      2. If CSA.SPU_Channel_0_Count=0 and
1539         *         CSA.SPU_Wr_Event_Mask[Lr]=1 and
1540         *         CSA.SPU_Event_Status[Lr]=0 then set
1541         *         CSA.SPU_Event_Status_Count=1.
1542         */
1543        ch0_cnt = csa->spu_chnlcnt_RW[0];
1544        ch0_data = csa->spu_chnldata_RW[0];
1545        ch1_data = csa->spu_chnldata_RW[1];
1546        csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1547        if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1548            (ch1_data & MFC_LLR_LOST_EVENT)) {
1549                csa->spu_chnlcnt_RW[0] = 1;
1550        }
1551}
1552
1553static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1554{
1555        /* Restore, Step 58:
1556         *     If the status of the CSA software decrementer
1557         *     "wrapped" flag is set, OR in a '1' to
1558         *     CSA.SPU_Event_Status[Tm].
1559         */
1560        if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
1561                return;
1562
1563        if ((csa->spu_chnlcnt_RW[0] == 0) &&
1564            (csa->spu_chnldata_RW[1] & 0x20) &&
1565            !(csa->spu_chnldata_RW[0] & 0x20))
1566                csa->spu_chnlcnt_RW[0] = 1;
1567
1568        csa->spu_chnldata_RW[0] |= 0x20;
1569}
1570
1571static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1572{
1573        struct spu_priv2 __iomem *priv2 = spu->priv2;
1574        u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1575        int i;
1576
1577        /* Restore, Step 59:
1578         *      Restore the following CH: [0,3,4,24,25,27]
1579         */
1580        for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1581                idx = ch_indices[i];
1582                out_be64(&priv2->spu_chnlcntptr_RW, idx);
1583                eieio();
1584                out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1585                out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1586                eieio();
1587        }
1588}
1589
1590static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1591{
1592        struct spu_priv2 __iomem *priv2 = spu->priv2;
1593        u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1594        u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1595        u64 idx;
1596        int i;
1597
1598        /* Restore, Step 60:
1599         *     Restore the following CH: [9,21,23].
1600         */
1601        ch_counts[0] = 1UL;
1602        ch_counts[1] = csa->spu_chnlcnt_RW[21];
1603        ch_counts[2] = 1UL;
1604        for (i = 0; i < 3; i++) {
1605                idx = ch_indices[i];
1606                out_be64(&priv2->spu_chnlcntptr_RW, idx);
1607                eieio();
1608                out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1609                eieio();
1610        }
1611}
1612
1613static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1614{
1615        struct spu_priv2 __iomem *priv2 = spu->priv2;
1616
1617        /* Restore, Step 61:
1618         *     Restore the SPU_LSLR register from CSA.
1619         */
1620        out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1621        eieio();
1622}
1623
1624static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1625{
1626        struct spu_priv2 __iomem *priv2 = spu->priv2;
1627
1628        /* Restore, Step 62:
1629         *     Restore the SPU_Cfg register from CSA.
1630         */
1631        out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1632        eieio();
1633}
1634
1635static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1636{
1637        /* Restore, Step 63:
1638         *     Restore PM_Trace_Tag_Wait_Mask from CSA.
1639         *     Not performed by this implementation.
1640         */
1641}
1642
1643static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1644{
1645        struct spu_problem __iomem *prob = spu->problem;
1646
1647        /* Restore, Step 64:
1648         *     Restore SPU_NPC from CSA.
1649         */
1650        out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1651        eieio();
1652}
1653
1654static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1655{
1656        struct spu_priv2 __iomem *priv2 = spu->priv2;
1657        int i;
1658
1659        /* Restore, Step 65:
1660         *     Restore MFC_RdSPU_MB from CSA.
1661         */
1662        out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1663        eieio();
1664        out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1665        for (i = 0; i < 4; i++) {
1666                out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1667        }
1668        eieio();
1669}
1670
1671static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1672{
1673        struct spu_problem __iomem *prob = spu->problem;
1674        u32 dummy = 0;
1675
1676        /* Restore, Step 66:
1677         *     If CSA.MB_Stat[P]=0 (mailbox empty) then
1678         *     read from the PPU_MB register.
1679         */
1680        if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1681                dummy = in_be32(&prob->pu_mb_R);
1682                eieio();
1683        }
1684}
1685
1686static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1687{
1688        struct spu_priv2 __iomem *priv2 = spu->priv2;
1689        u64 dummy = 0UL;
1690
1691        /* Restore, Step 66:
1692         *     If CSA.MB_Stat[I]=0 (mailbox empty) then
1693         *     read from the PPUINT_MB register.
1694         */
1695        if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1696                dummy = in_be64(&priv2->puint_mb_R);
1697                eieio();
1698                spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1699                eieio();
1700        }
1701}
1702
1703static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1704{
1705        /* Restore, Step 69:
1706         *     Restore the MFC_SR1 register from CSA.
1707         */
1708        spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1709        eieio();
1710}
1711
1712static inline void set_int_route(struct spu_state *csa, struct spu *spu)
1713{
1714        struct spu_context *ctx = spu->ctx;
1715
1716        spu_cpu_affinity_set(spu, ctx->last_ran);
1717}
1718
1719static inline void restore_other_spu_access(struct spu_state *csa,
1720                                            struct spu *spu)
1721{
1722        /* Restore, Step 70:
1723         *     Restore other SPU mappings to this SPU. TBD.
1724         */
1725}
1726
1727static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1728{
1729        struct spu_problem __iomem *prob = spu->problem;
1730
1731        /* Restore, Step 71:
1732         *     If CSA.SPU_Status[R]=1 then write
1733         *     SPU_RunCntl[R0R1]='01'.
1734         */
1735        if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1736                out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1737                eieio();
1738        }
1739}
1740
1741static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1742{
1743        struct spu_priv2 __iomem *priv2 = spu->priv2;
1744
1745        /* Restore, Step 72:
1746         *    Restore the MFC_CNTL register for the CSA.
1747         */
1748        out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1749        eieio();
1750
1751        /*
1752         * The queue is put back into the same state that was evident prior to
1753         * the context switch. The suspend flag is added to the saved state in
1754         * the csa, if the operational state was suspending or suspended. In
1755         * this case, the code that suspended the mfc is responsible for
1756         * continuing it. Note that SPE faults do not change the operational
1757         * state of the spu.
1758         */
1759}
1760
1761static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1762{
1763        /* Restore, Step 73:
1764         *     Enable user-space access (if provided) to this
1765         *     SPU by mapping the virtual pages assigned to
1766         *     the SPU memory-mapped I/O (MMIO) for problem
1767         *     state. TBD.
1768         */
1769}
1770
1771static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1772{
1773        /* Restore, Step 74:
1774         *     Reset the "context switch active" flag.
1775         *     Not performed by this implementation.
1776         */
1777}
1778
1779static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1780{
1781        /* Restore, Step 75:
1782         *     Re-enable SPU interrupts.
1783         */
1784        spin_lock_irq(&spu->register_lock);
1785        spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1786        spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1787        spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1788        spin_unlock_irq(&spu->register_lock);
1789}
1790
1791static int quiece_spu(struct spu_state *prev, struct spu *spu)
1792{
1793        /*
1794         * Combined steps 2-18 of SPU context save sequence, which
1795         * quiesce the SPU state (disable SPU execution, MFC command
1796         * queues, decrementer, SPU interrupts, etc.).
1797         *
1798         * Returns      0 on success.
1799         *              2 if failed step 2.
1800         *              6 if failed step 6.
1801         */
1802
1803        if (check_spu_isolate(prev, spu)) {     /* Step 2. */
1804                return 2;
1805        }
1806        disable_interrupts(prev, spu);          /* Step 3. */
1807        set_watchdog_timer(prev, spu);          /* Step 4. */
1808        inhibit_user_access(prev, spu);         /* Step 5. */
1809        if (check_spu_isolate(prev, spu)) {     /* Step 6. */
1810                return 6;
1811        }
1812        set_switch_pending(prev, spu);          /* Step 7. */
1813        save_mfc_cntl(prev, spu);               /* Step 8. */
1814        save_spu_runcntl(prev, spu);            /* Step 9. */
1815        save_mfc_sr1(prev, spu);                /* Step 10. */
1816        save_spu_status(prev, spu);             /* Step 11. */
1817        save_mfc_stopped_status(prev, spu);     /* Step 12. */
1818        halt_mfc_decr(prev, spu);               /* Step 13. */
1819        save_timebase(prev, spu);               /* Step 14. */
1820        remove_other_spu_access(prev, spu);     /* Step 15. */
1821        do_mfc_mssync(prev, spu);               /* Step 16. */
1822        issue_mfc_tlbie(prev, spu);             /* Step 17. */
1823        handle_pending_interrupts(prev, spu);   /* Step 18. */
1824
1825        return 0;
1826}
1827
1828static void save_csa(struct spu_state *prev, struct spu *spu)
1829{
1830        /*
1831         * Combine steps 19-44 of SPU context save sequence, which
1832         * save regions of the privileged & problem state areas.
1833         */
1834
1835        save_mfc_queues(prev, spu);     /* Step 19. */
1836        save_ppu_querymask(prev, spu);  /* Step 20. */
1837        save_ppu_querytype(prev, spu);  /* Step 21. */
1838        save_ppu_tagstatus(prev, spu);  /* NEW.     */
1839        save_mfc_csr_tsq(prev, spu);    /* Step 22. */
1840        save_mfc_csr_cmd(prev, spu);    /* Step 23. */
1841        save_mfc_csr_ato(prev, spu);    /* Step 24. */
1842        save_mfc_tclass_id(prev, spu);  /* Step 25. */
1843        set_mfc_tclass_id(prev, spu);   /* Step 26. */
1844        save_mfc_cmd(prev, spu);        /* Step 26a - moved from 44. */
1845        purge_mfc_queue(prev, spu);     /* Step 27. */
1846        wait_purge_complete(prev, spu); /* Step 28. */
1847        setup_mfc_sr1(prev, spu);       /* Step 30. */
1848        save_spu_npc(prev, spu);        /* Step 31. */
1849        save_spu_privcntl(prev, spu);   /* Step 32. */
1850        reset_spu_privcntl(prev, spu);  /* Step 33. */
1851        save_spu_lslr(prev, spu);       /* Step 34. */
1852        reset_spu_lslr(prev, spu);      /* Step 35. */
1853        save_spu_cfg(prev, spu);        /* Step 36. */
1854        save_pm_trace(prev, spu);       /* Step 37. */
1855        save_mfc_rag(prev, spu);        /* Step 38. */
1856        save_ppu_mb_stat(prev, spu);    /* Step 39. */
1857        save_ppu_mb(prev, spu);         /* Step 40. */
1858        save_ppuint_mb(prev, spu);      /* Step 41. */
1859        save_ch_part1(prev, spu);       /* Step 42. */
1860        save_spu_mb(prev, spu);         /* Step 43. */
1861        reset_ch(prev, spu);            /* Step 45. */
1862}
1863
1864static void save_lscsa(struct spu_state *prev, struct spu *spu)
1865{
1866        /*
1867         * Perform steps 46-57 of SPU context save sequence,
1868         * which save regions of the local store and register
1869         * file.
1870         */
1871
1872        resume_mfc_queue(prev, spu);    /* Step 46. */
1873        /* Step 47. */
1874        setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
1875        set_switch_active(prev, spu);   /* Step 48. */
1876        enable_interrupts(prev, spu);   /* Step 49. */
1877        save_ls_16kb(prev, spu);        /* Step 50. */
1878        set_spu_npc(prev, spu);         /* Step 51. */
1879        set_signot1(prev, spu);         /* Step 52. */
1880        set_signot2(prev, spu);         /* Step 53. */
1881        send_save_code(prev, spu);      /* Step 54. */
1882        set_ppu_querymask(prev, spu);   /* Step 55. */
1883        wait_tag_complete(prev, spu);   /* Step 56. */
1884        wait_spu_stopped(prev, spu);    /* Step 57. */
1885}
1886
1887static void force_spu_isolate_exit(struct spu *spu)
1888{
1889        struct spu_problem __iomem *prob = spu->problem;
1890        struct spu_priv2 __iomem *priv2 = spu->priv2;
1891
1892        /* Stop SPE execution and wait for completion. */
1893        out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1894        iobarrier_rw();
1895        POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
1896
1897        /* Restart SPE master runcntl. */
1898        spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1899        iobarrier_w();
1900
1901        /* Initiate isolate exit request and wait for completion. */
1902        out_be64(&priv2->spu_privcntl_RW, 4LL);
1903        iobarrier_w();
1904        out_be32(&prob->spu_runcntl_RW, 2);
1905        iobarrier_rw();
1906        POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
1907                                & SPU_STATUS_STOPPED_BY_STOP));
1908
1909        /* Reset load request to normal. */
1910        out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
1911        iobarrier_w();
1912}
1913
1914/**
1915 * stop_spu_isolate
1916 *      Check SPU run-control state and force isolated
1917 *      exit function as necessary.
1918 */
1919static void stop_spu_isolate(struct spu *spu)
1920{
1921        struct spu_problem __iomem *prob = spu->problem;
1922
1923        if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
1924                /* The SPU is in isolated state; the only way
1925                 * to get it out is to perform an isolated
1926                 * exit (clean) operation.
1927                 */
1928                force_spu_isolate_exit(spu);
1929        }
1930}
1931
1932static void harvest(struct spu_state *prev, struct spu *spu)
1933{
1934        /*
1935         * Perform steps 2-25 of SPU context restore sequence,
1936         * which resets an SPU either after a failed save, or
1937         * when using SPU for first time.
1938         */
1939
1940        disable_interrupts(prev, spu);          /* Step 2.  */
1941        inhibit_user_access(prev, spu);         /* Step 3.  */
1942        terminate_spu_app(prev, spu);           /* Step 4.  */
1943        set_switch_pending(prev, spu);          /* Step 5.  */
1944        stop_spu_isolate(spu);                  /* NEW.     */
1945        remove_other_spu_access(prev, spu);     /* Step 6.  */
1946        suspend_mfc_and_halt_decr(prev, spu);   /* Step 7.  */
1947        wait_suspend_mfc_complete(prev, spu);   /* Step 8.  */
1948        if (!suspend_spe(prev, spu))            /* Step 9.  */
1949                clear_spu_status(prev, spu);    /* Step 10. */
1950        do_mfc_mssync(prev, spu);               /* Step 11. */
1951        issue_mfc_tlbie(prev, spu);             /* Step 12. */
1952        handle_pending_interrupts(prev, spu);   /* Step 13. */
1953        purge_mfc_queue(prev, spu);             /* Step 14. */
1954        wait_purge_complete(prev, spu);         /* Step 15. */
1955        reset_spu_privcntl(prev, spu);          /* Step 16. */
1956        reset_spu_lslr(prev, spu);              /* Step 17. */
1957        setup_mfc_sr1(prev, spu);               /* Step 18. */
1958        spu_invalidate_slbs(spu);               /* Step 19. */
1959        reset_ch_part1(prev, spu);              /* Step 20. */
1960        reset_ch_part2(prev, spu);              /* Step 21. */
1961        enable_interrupts(prev, spu);           /* Step 22. */
1962        set_switch_active(prev, spu);           /* Step 23. */
1963        set_mfc_tclass_id(prev, spu);           /* Step 24. */
1964        resume_mfc_queue(prev, spu);            /* Step 25. */
1965}
1966
1967static void restore_lscsa(struct spu_state *next, struct spu *spu)
1968{
1969        /*
1970         * Perform steps 26-40 of SPU context restore sequence,
1971         * which restores regions of the local store and register
1972         * file.
1973         */
1974
1975        set_watchdog_timer(next, spu);          /* Step 26. */
1976        setup_spu_status_part1(next, spu);      /* Step 27. */
1977        setup_spu_status_part2(next, spu);      /* Step 28. */
1978        restore_mfc_rag(next, spu);             /* Step 29. */
1979        /* Step 30. */
1980        setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
1981        set_spu_npc(next, spu);                 /* Step 31. */
1982        set_signot1(next, spu);                 /* Step 32. */
1983        set_signot2(next, spu);                 /* Step 33. */
1984        setup_decr(next, spu);                  /* Step 34. */
1985        setup_ppu_mb(next, spu);                /* Step 35. */
1986        setup_ppuint_mb(next, spu);             /* Step 36. */
1987        send_restore_code(next, spu);           /* Step 37. */
1988        set_ppu_querymask(next, spu);           /* Step 38. */
1989        wait_tag_complete(next, spu);           /* Step 39. */
1990        wait_spu_stopped(next, spu);            /* Step 40. */
1991}
1992
1993static void restore_csa(struct spu_state *next, struct spu *spu)
1994{
1995        /*
1996         * Combine steps 41-76 of SPU context restore sequence, which
1997         * restore regions of the privileged & problem state areas.
1998         */
1999
2000        restore_spu_privcntl(next, spu);        /* Step 41. */
2001        restore_status_part1(next, spu);        /* Step 42. */
2002        restore_status_part2(next, spu);        /* Step 43. */
2003        restore_ls_16kb(next, spu);             /* Step 44. */
2004        wait_tag_complete(next, spu);           /* Step 45. */
2005        suspend_mfc(next, spu);                 /* Step 46. */
2006        wait_suspend_mfc_complete(next, spu);   /* Step 47. */
2007        issue_mfc_tlbie(next, spu);             /* Step 48. */
2008        clear_interrupts(next, spu);            /* Step 49. */
2009        restore_mfc_queues(next, spu);          /* Step 50. */
2010        restore_ppu_querymask(next, spu);       /* Step 51. */
2011        restore_ppu_querytype(next, spu);       /* Step 52. */
2012        restore_mfc_csr_tsq(next, spu);         /* Step 53. */
2013        restore_mfc_csr_cmd(next, spu);         /* Step 54. */
2014        restore_mfc_csr_ato(next, spu);         /* Step 55. */
2015        restore_mfc_tclass_id(next, spu);       /* Step 56. */
2016        set_llr_event(next, spu);               /* Step 57. */
2017        restore_decr_wrapped(next, spu);        /* Step 58. */
2018        restore_ch_part1(next, spu);            /* Step 59. */
2019        restore_ch_part2(next, spu);            /* Step 60. */
2020        restore_spu_lslr(next, spu);            /* Step 61. */
2021        restore_spu_cfg(next, spu);             /* Step 62. */
2022        restore_pm_trace(next, spu);            /* Step 63. */
2023        restore_spu_npc(next, spu);             /* Step 64. */
2024        restore_spu_mb(next, spu);              /* Step 65. */
2025        check_ppu_mb_stat(next, spu);           /* Step 66. */
2026        check_ppuint_mb_stat(next, spu);        /* Step 67. */
2027        spu_invalidate_slbs(spu);               /* Modified Step 68. */
2028        restore_mfc_sr1(next, spu);             /* Step 69. */
2029        set_int_route(next, spu);               /* NEW      */
2030        restore_other_spu_access(next, spu);    /* Step 70. */
2031        restore_spu_runcntl(next, spu);         /* Step 71. */
2032        restore_mfc_cntl(next, spu);            /* Step 72. */
2033        enable_user_access(next, spu);          /* Step 73. */
2034        reset_switch_active(next, spu);         /* Step 74. */
2035        reenable_interrupts(next, spu);         /* Step 75. */
2036}
2037
2038static int __do_spu_save(struct spu_state *prev, struct spu *spu)
2039{
2040        int rc;
2041
2042        /*
2043         * SPU context save can be broken into three phases:
2044         *
2045         *     (a) quiesce [steps 2-16].
2046         *     (b) save of CSA, performed by PPE [steps 17-42]
2047         *     (c) save of LSCSA, mostly performed by SPU [steps 43-52].
2048         *
2049         * Returns      0 on success.
2050         *              2,6 if failed to quiece SPU
2051         *              53 if SPU-side of save failed.
2052         */
2053
2054        rc = quiece_spu(prev, spu);             /* Steps 2-16. */
2055        switch (rc) {
2056        default:
2057        case 2:
2058        case 6:
2059                harvest(prev, spu);
2060                return rc;
2061                break;
2062        case 0:
2063                break;
2064        }
2065        save_csa(prev, spu);                    /* Steps 17-43. */
2066        save_lscsa(prev, spu);                  /* Steps 44-53. */
2067        return check_save_status(prev, spu);    /* Step 54.     */
2068}
2069
2070static int __do_spu_restore(struct spu_state *next, struct spu *spu)
2071{
2072        int rc;
2073
2074        /*
2075         * SPU context restore can be broken into three phases:
2076         *
2077         *    (a) harvest (or reset) SPU [steps 2-24].
2078         *    (b) restore LSCSA [steps 25-40], mostly performed by SPU.
2079         *    (c) restore CSA [steps 41-76], performed by PPE.
2080         *
2081         * The 'harvest' step is not performed here, but rather
2082         * as needed below.
2083         */
2084
2085        restore_lscsa(next, spu);               /* Steps 24-39. */
2086        rc = check_restore_status(next, spu);   /* Step 40.     */
2087        switch (rc) {
2088        default:
2089                /* Failed. Return now. */
2090                return rc;
2091                break;
2092        case 0:
2093                /* Fall through to next step. */
2094                break;
2095        }
2096        restore_csa(next, spu);
2097
2098        return 0;
2099}
2100
2101/**
2102 * spu_save - SPU context save, with locking.
2103 * @prev: pointer to SPU context save area, to be saved.
2104 * @spu: pointer to SPU iomem structure.
2105 *
2106 * Acquire locks, perform the save operation then return.
2107 */
2108int spu_save(struct spu_state *prev, struct spu *spu)
2109{
2110        int rc;
2111
2112        acquire_spu_lock(spu);          /* Step 1.     */
2113        rc = __do_spu_save(prev, spu);  /* Steps 2-53. */
2114        release_spu_lock(spu);
2115        if (rc != 0 && rc != 2 && rc != 6) {
2116                panic("%s failed on SPU[%d], rc=%d.\n",
2117                      __func__, spu->number, rc);
2118        }
2119        return 0;
2120}
2121EXPORT_SYMBOL_GPL(spu_save);
2122
2123/**
2124 * spu_restore - SPU context restore, with harvest and locking.
2125 * @new: pointer to SPU context save area, to be restored.
2126 * @spu: pointer to SPU iomem structure.
2127 *
2128 * Perform harvest + restore, as we may not be coming
2129 * from a previous successful save operation, and the
2130 * hardware state is unknown.
2131 */
2132int spu_restore(struct spu_state *new, struct spu *spu)
2133{
2134        int rc;
2135
2136        acquire_spu_lock(spu);
2137        harvest(NULL, spu);
2138        spu->slb_replace = 0;
2139        rc = __do_spu_restore(new, spu);
2140        release_spu_lock(spu);
2141        if (rc) {
2142                panic("%s failed on SPU[%d] rc=%d.\n",
2143                       __func__, spu->number, rc);
2144        }
2145        return rc;
2146}
2147EXPORT_SYMBOL_GPL(spu_restore);
2148
2149static void init_prob(struct spu_state *csa)
2150{
2151        csa->spu_chnlcnt_RW[9] = 1;
2152        csa->spu_chnlcnt_RW[21] = 16;
2153        csa->spu_chnlcnt_RW[23] = 1;
2154        csa->spu_chnlcnt_RW[28] = 1;
2155        csa->spu_chnlcnt_RW[30] = 1;
2156        csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2157        csa->prob.mb_stat_R = 0x000400;
2158}
2159
2160static void init_priv1(struct spu_state *csa)
2161{
2162        /* Enable decode, relocate, tlbie response, master runcntl. */
2163        csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2164            MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2165            MFC_STATE1_PROBLEM_STATE_MASK |
2166            MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2167
2168        /* Enable OS-specific set of interrupts. */
2169        csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2170            CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2171            CLASS0_ENABLE_SPU_ERROR_INTR;
2172        csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2173            CLASS1_ENABLE_STORAGE_FAULT_INTR;
2174        csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2175            CLASS2_ENABLE_SPU_HALT_INTR |
2176            CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2177}
2178
2179static void init_priv2(struct spu_state *csa)
2180{
2181        csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2182        csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2183            MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2184            MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2185}
2186
2187/**
2188 * spu_alloc_csa - allocate and initialize an SPU context save area.
2189 *
2190 * Allocate and initialize the contents of an SPU context save area.
2191 * This includes enabling address translation, interrupt masks, etc.,
2192 * as appropriate for the given OS environment.
2193 *
2194 * Note that storage for the 'lscsa' is allocated separately,
2195 * as it is by far the largest of the context save regions,
2196 * and may need to be pinned or otherwise specially aligned.
2197 */
2198int spu_init_csa(struct spu_state *csa)
2199{
2200        int rc;
2201
2202        if (!csa)
2203                return -EINVAL;
2204        memset(csa, 0, sizeof(struct spu_state));
2205
2206        rc = spu_alloc_lscsa(csa);
2207        if (rc)
2208                return rc;
2209
2210        spin_lock_init(&csa->register_lock);
2211
2212        init_prob(csa);
2213        init_priv1(csa);
2214        init_priv2(csa);
2215
2216        return 0;
2217}
2218
2219void spu_fini_csa(struct spu_state *csa)
2220{
2221        spu_free_lscsa(csa);
2222}
2223