linux/arch/powerpc/platforms/cell/spufs/backing_ops.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* backing_ops.c - query/set operations on saved SPU context.
   3 *
   4 * Copyright (C) IBM 2005
   5 * Author: Mark Nutter <mnutter@us.ibm.com>
   6 *
   7 * These register operations allow SPUFS to operate on saved
   8 * SPU contexts rather than hardware.
   9 */
  10
  11#include <linux/errno.h>
  12#include <linux/sched.h>
  13#include <linux/kernel.h>
  14#include <linux/mm.h>
  15#include <linux/vmalloc.h>
  16#include <linux/smp.h>
  17#include <linux/stddef.h>
  18#include <linux/unistd.h>
  19#include <linux/poll.h>
  20
  21#include <asm/io.h>
  22#include <asm/spu.h>
  23#include <asm/spu_csa.h>
  24#include <asm/spu_info.h>
  25#include <asm/mmu_context.h>
  26#include "spufs.h"
  27
  28/*
  29 * Reads/writes to various problem and priv2 registers require
  30 * state changes, i.e.  generate SPU events, modify channel
  31 * counts, etc.
  32 */
  33
  34static void gen_spu_event(struct spu_context *ctx, u32 event)
  35{
  36        u64 ch0_cnt;
  37        u64 ch0_data;
  38        u64 ch1_data;
  39
  40        ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
  41        ch0_data = ctx->csa.spu_chnldata_RW[0];
  42        ch1_data = ctx->csa.spu_chnldata_RW[1];
  43        ctx->csa.spu_chnldata_RW[0] |= event;
  44        if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
  45                ctx->csa.spu_chnlcnt_RW[0] = 1;
  46        }
  47}
  48
  49static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
  50{
  51        u32 mbox_stat;
  52        int ret = 0;
  53
  54        spin_lock(&ctx->csa.register_lock);
  55        mbox_stat = ctx->csa.prob.mb_stat_R;
  56        if (mbox_stat & 0x0000ff) {
  57                /* Read the first available word.
  58                 * Implementation note: the depth
  59                 * of pu_mb_R is currently 1.
  60                 */
  61                *data = ctx->csa.prob.pu_mb_R;
  62                ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
  63                ctx->csa.spu_chnlcnt_RW[28] = 1;
  64                gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
  65                ret = 4;
  66        }
  67        spin_unlock(&ctx->csa.register_lock);
  68        return ret;
  69}
  70
  71static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
  72{
  73        return ctx->csa.prob.mb_stat_R;
  74}
  75
  76static __poll_t spu_backing_mbox_stat_poll(struct spu_context *ctx,
  77                                          __poll_t events)
  78{
  79        __poll_t ret;
  80        u32 stat;
  81
  82        ret = 0;
  83        spin_lock_irq(&ctx->csa.register_lock);
  84        stat = ctx->csa.prob.mb_stat_R;
  85
  86        /* if the requested event is there, return the poll
  87           mask, otherwise enable the interrupt to get notified,
  88           but first mark any pending interrupts as done so
  89           we don't get woken up unnecessarily */
  90
  91        if (events & (EPOLLIN | EPOLLRDNORM)) {
  92                if (stat & 0xff0000)
  93                        ret |= EPOLLIN | EPOLLRDNORM;
  94                else {
  95                        ctx->csa.priv1.int_stat_class2_RW &=
  96                                ~CLASS2_MAILBOX_INTR;
  97                        ctx->csa.priv1.int_mask_class2_RW |=
  98                                CLASS2_ENABLE_MAILBOX_INTR;
  99                }
 100        }
 101        if (events & (EPOLLOUT | EPOLLWRNORM)) {
 102                if (stat & 0x00ff00)
 103                        ret = EPOLLOUT | EPOLLWRNORM;
 104                else {
 105                        ctx->csa.priv1.int_stat_class2_RW &=
 106                                ~CLASS2_MAILBOX_THRESHOLD_INTR;
 107                        ctx->csa.priv1.int_mask_class2_RW |=
 108                                CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
 109                }
 110        }
 111        spin_unlock_irq(&ctx->csa.register_lock);
 112        return ret;
 113}
 114
 115static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
 116{
 117        int ret;
 118
 119        spin_lock(&ctx->csa.register_lock);
 120        if (ctx->csa.prob.mb_stat_R & 0xff0000) {
 121                /* Read the first available word.
 122                 * Implementation note: the depth
 123                 * of puint_mb_R is currently 1.
 124                 */
 125                *data = ctx->csa.priv2.puint_mb_R;
 126                ctx->csa.prob.mb_stat_R &= ~(0xff0000);
 127                ctx->csa.spu_chnlcnt_RW[30] = 1;
 128                gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
 129                ret = 4;
 130        } else {
 131                /* make sure we get woken up by the interrupt */
 132                ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
 133                ret = 0;
 134        }
 135        spin_unlock(&ctx->csa.register_lock);
 136        return ret;
 137}
 138
 139static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
 140{
 141        int ret;
 142
 143        spin_lock(&ctx->csa.register_lock);
 144        if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
 145                int slot = ctx->csa.spu_chnlcnt_RW[29];
 146                int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
 147
 148                /* We have space to write wbox_data.
 149                 * Implementation note: the depth
 150                 * of spu_mb_W is currently 4.
 151                 */
 152                BUG_ON(avail != (4 - slot));
 153                ctx->csa.spu_mailbox_data[slot] = data;
 154                ctx->csa.spu_chnlcnt_RW[29] = ++slot;
 155                ctx->csa.prob.mb_stat_R &= ~(0x00ff00);
 156                ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8);
 157                gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
 158                ret = 4;
 159        } else {
 160                /* make sure we get woken up by the interrupt when space
 161                   becomes available */
 162                ctx->csa.priv1.int_mask_class2_RW |=
 163                        CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
 164                ret = 0;
 165        }
 166        spin_unlock(&ctx->csa.register_lock);
 167        return ret;
 168}
 169
 170static u32 spu_backing_signal1_read(struct spu_context *ctx)
 171{
 172        return ctx->csa.spu_chnldata_RW[3];
 173}
 174
 175static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
 176{
 177        spin_lock(&ctx->csa.register_lock);
 178        if (ctx->csa.priv2.spu_cfg_RW & 0x1)
 179                ctx->csa.spu_chnldata_RW[3] |= data;
 180        else
 181                ctx->csa.spu_chnldata_RW[3] = data;
 182        ctx->csa.spu_chnlcnt_RW[3] = 1;
 183        gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
 184        spin_unlock(&ctx->csa.register_lock);
 185}
 186
 187static u32 spu_backing_signal2_read(struct spu_context *ctx)
 188{
 189        return ctx->csa.spu_chnldata_RW[4];
 190}
 191
 192static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
 193{
 194        spin_lock(&ctx->csa.register_lock);
 195        if (ctx->csa.priv2.spu_cfg_RW & 0x2)
 196                ctx->csa.spu_chnldata_RW[4] |= data;
 197        else
 198                ctx->csa.spu_chnldata_RW[4] = data;
 199        ctx->csa.spu_chnlcnt_RW[4] = 1;
 200        gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
 201        spin_unlock(&ctx->csa.register_lock);
 202}
 203
 204static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
 205{
 206        u64 tmp;
 207
 208        spin_lock(&ctx->csa.register_lock);
 209        tmp = ctx->csa.priv2.spu_cfg_RW;
 210        if (val)
 211                tmp |= 1;
 212        else
 213                tmp &= ~1;
 214        ctx->csa.priv2.spu_cfg_RW = tmp;
 215        spin_unlock(&ctx->csa.register_lock);
 216}
 217
 218static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
 219{
 220        return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
 221}
 222
 223static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
 224{
 225        u64 tmp;
 226
 227        spin_lock(&ctx->csa.register_lock);
 228        tmp = ctx->csa.priv2.spu_cfg_RW;
 229        if (val)
 230                tmp |= 2;
 231        else
 232                tmp &= ~2;
 233        ctx->csa.priv2.spu_cfg_RW = tmp;
 234        spin_unlock(&ctx->csa.register_lock);
 235}
 236
 237static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
 238{
 239        return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
 240}
 241
 242static u32 spu_backing_npc_read(struct spu_context *ctx)
 243{
 244        return ctx->csa.prob.spu_npc_RW;
 245}
 246
 247static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
 248{
 249        ctx->csa.prob.spu_npc_RW = val;
 250}
 251
 252static u32 spu_backing_status_read(struct spu_context *ctx)
 253{
 254        return ctx->csa.prob.spu_status_R;
 255}
 256
 257static char *spu_backing_get_ls(struct spu_context *ctx)
 258{
 259        return ctx->csa.lscsa->ls;
 260}
 261
 262static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val)
 263{
 264        ctx->csa.priv2.spu_privcntl_RW = val;
 265}
 266
 267static u32 spu_backing_runcntl_read(struct spu_context *ctx)
 268{
 269        return ctx->csa.prob.spu_runcntl_RW;
 270}
 271
 272static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
 273{
 274        spin_lock(&ctx->csa.register_lock);
 275        ctx->csa.prob.spu_runcntl_RW = val;
 276        if (val & SPU_RUNCNTL_RUNNABLE) {
 277                ctx->csa.prob.spu_status_R &=
 278                        ~SPU_STATUS_STOPPED_BY_STOP &
 279                        ~SPU_STATUS_STOPPED_BY_HALT &
 280                        ~SPU_STATUS_SINGLE_STEP &
 281                        ~SPU_STATUS_INVALID_INSTR &
 282                        ~SPU_STATUS_INVALID_CH;
 283                ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
 284        } else {
 285                ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
 286        }
 287        spin_unlock(&ctx->csa.register_lock);
 288}
 289
 290static void spu_backing_runcntl_stop(struct spu_context *ctx)
 291{
 292        spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
 293}
 294
 295static void spu_backing_master_start(struct spu_context *ctx)
 296{
 297        struct spu_state *csa = &ctx->csa;
 298        u64 sr1;
 299
 300        spin_lock(&csa->register_lock);
 301        sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
 302        csa->priv1.mfc_sr1_RW = sr1;
 303        spin_unlock(&csa->register_lock);
 304}
 305
 306static void spu_backing_master_stop(struct spu_context *ctx)
 307{
 308        struct spu_state *csa = &ctx->csa;
 309        u64 sr1;
 310
 311        spin_lock(&csa->register_lock);
 312        sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
 313        csa->priv1.mfc_sr1_RW = sr1;
 314        spin_unlock(&csa->register_lock);
 315}
 316
 317static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
 318                                        u32 mode)
 319{
 320        struct spu_problem_collapsed *prob = &ctx->csa.prob;
 321        int ret;
 322
 323        spin_lock(&ctx->csa.register_lock);
 324        ret = -EAGAIN;
 325        if (prob->dma_querytype_RW)
 326                goto out;
 327        ret = 0;
 328        /* FIXME: what are the side-effects of this? */
 329        prob->dma_querymask_RW = mask;
 330        prob->dma_querytype_RW = mode;
 331        /* In the current implementation, the SPU context is always
 332         * acquired in runnable state when new bits are added to the
 333         * mask (tagwait), so it's sufficient just to mask
 334         * dma_tagstatus_R with the 'mask' parameter here.
 335         */
 336        ctx->csa.prob.dma_tagstatus_R &= mask;
 337out:
 338        spin_unlock(&ctx->csa.register_lock);
 339
 340        return ret;
 341}
 342
 343static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)
 344{
 345        return ctx->csa.prob.dma_tagstatus_R;
 346}
 347
 348static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)
 349{
 350        return ctx->csa.prob.dma_qstatus_R;
 351}
 352
 353static int spu_backing_send_mfc_command(struct spu_context *ctx,
 354                                        struct mfc_dma_command *cmd)
 355{
 356        int ret;
 357
 358        spin_lock(&ctx->csa.register_lock);
 359        ret = -EAGAIN;
 360        /* FIXME: set up priv2->puq */
 361        spin_unlock(&ctx->csa.register_lock);
 362
 363        return ret;
 364}
 365
 366static void spu_backing_restart_dma(struct spu_context *ctx)
 367{
 368        ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
 369}
 370
 371struct spu_context_ops spu_backing_ops = {
 372        .mbox_read = spu_backing_mbox_read,
 373        .mbox_stat_read = spu_backing_mbox_stat_read,
 374        .mbox_stat_poll = spu_backing_mbox_stat_poll,
 375        .ibox_read = spu_backing_ibox_read,
 376        .wbox_write = spu_backing_wbox_write,
 377        .signal1_read = spu_backing_signal1_read,
 378        .signal1_write = spu_backing_signal1_write,
 379        .signal2_read = spu_backing_signal2_read,
 380        .signal2_write = spu_backing_signal2_write,
 381        .signal1_type_set = spu_backing_signal1_type_set,
 382        .signal1_type_get = spu_backing_signal1_type_get,
 383        .signal2_type_set = spu_backing_signal2_type_set,
 384        .signal2_type_get = spu_backing_signal2_type_get,
 385        .npc_read = spu_backing_npc_read,
 386        .npc_write = spu_backing_npc_write,
 387        .status_read = spu_backing_status_read,
 388        .get_ls = spu_backing_get_ls,
 389        .privcntl_write = spu_backing_privcntl_write,
 390        .runcntl_read = spu_backing_runcntl_read,
 391        .runcntl_write = spu_backing_runcntl_write,
 392        .runcntl_stop = spu_backing_runcntl_stop,
 393        .master_start = spu_backing_master_start,
 394        .master_stop = spu_backing_master_stop,
 395        .set_mfc_query = spu_backing_set_mfc_query,
 396        .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
 397        .get_mfc_free_elements = spu_backing_get_mfc_free_elements,
 398        .send_mfc_command = spu_backing_send_mfc_command,
 399        .restart_dma = spu_backing_restart_dma,
 400};
 401