linux/arch/powerpc/platforms/cell/spufs/file.c
<<
>>
Prefs
   1/*
   2 * SPU file system -- file contents
   3 *
   4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
   5 *
   6 * Author: Arnd Bergmann <arndb@de.ibm.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2, or (at your option)
  11 * any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23#undef DEBUG
  24
  25#include <linux/fs.h>
  26#include <linux/ioctl.h>
  27#include <linux/module.h>
  28#include <linux/pagemap.h>
  29#include <linux/poll.h>
  30#include <linux/ptrace.h>
  31#include <linux/seq_file.h>
  32
  33#include <asm/io.h>
  34#include <asm/time.h>
  35#include <asm/spu.h>
  36#include <asm/spu_info.h>
  37#include <asm/uaccess.h>
  38
  39#include "spufs.h"
  40#include "sputrace.h"
  41
  42#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
  43
  44/* Simple attribute files */
  45struct spufs_attr {
  46        int (*get)(void *, u64 *);
  47        int (*set)(void *, u64);
  48        char get_buf[24];       /* enough to store a u64 and "\n\0" */
  49        char set_buf[24];
  50        void *data;
  51        const char *fmt;        /* format for read operation */
  52        struct mutex mutex;     /* protects access to these buffers */
  53};
  54
  55static int spufs_attr_open(struct inode *inode, struct file *file,
  56                int (*get)(void *, u64 *), int (*set)(void *, u64),
  57                const char *fmt)
  58{
  59        struct spufs_attr *attr;
  60
  61        attr = kmalloc(sizeof(*attr), GFP_KERNEL);
  62        if (!attr)
  63                return -ENOMEM;
  64
  65        attr->get = get;
  66        attr->set = set;
  67        attr->data = inode->i_private;
  68        attr->fmt = fmt;
  69        mutex_init(&attr->mutex);
  70        file->private_data = attr;
  71
  72        return nonseekable_open(inode, file);
  73}
  74
  75static int spufs_attr_release(struct inode *inode, struct file *file)
  76{
  77       kfree(file->private_data);
  78        return 0;
  79}
  80
  81static ssize_t spufs_attr_read(struct file *file, char __user *buf,
  82                size_t len, loff_t *ppos)
  83{
  84        struct spufs_attr *attr;
  85        size_t size;
  86        ssize_t ret;
  87
  88        attr = file->private_data;
  89        if (!attr->get)
  90                return -EACCES;
  91
  92        ret = mutex_lock_interruptible(&attr->mutex);
  93        if (ret)
  94                return ret;
  95
  96        if (*ppos) {            /* continued read */
  97                size = strlen(attr->get_buf);
  98        } else {                /* first read */
  99                u64 val;
 100                ret = attr->get(attr->data, &val);
 101                if (ret)
 102                        goto out;
 103
 104                size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
 105                                 attr->fmt, (unsigned long long)val);
 106        }
 107
 108        ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
 109out:
 110        mutex_unlock(&attr->mutex);
 111        return ret;
 112}
 113
 114static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
 115                size_t len, loff_t *ppos)
 116{
 117        struct spufs_attr *attr;
 118        u64 val;
 119        size_t size;
 120        ssize_t ret;
 121
 122        attr = file->private_data;
 123        if (!attr->set)
 124                return -EACCES;
 125
 126        ret = mutex_lock_interruptible(&attr->mutex);
 127        if (ret)
 128                return ret;
 129
 130        ret = -EFAULT;
 131        size = min(sizeof(attr->set_buf) - 1, len);
 132        if (copy_from_user(attr->set_buf, buf, size))
 133                goto out;
 134
 135        ret = len; /* claim we got the whole input */
 136        attr->set_buf[size] = '\0';
 137        val = simple_strtol(attr->set_buf, NULL, 0);
 138        attr->set(attr->data, val);
 139out:
 140        mutex_unlock(&attr->mutex);
 141        return ret;
 142}
 143
 144#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)      \
 145static int __fops ## _open(struct inode *inode, struct file *file)      \
 146{                                                                       \
 147        __simple_attr_check_format(__fmt, 0ull);                        \
 148        return spufs_attr_open(inode, file, __get, __set, __fmt);       \
 149}                                                                       \
 150static const struct file_operations __fops = {                          \
 151        .owner   = THIS_MODULE,                                         \
 152        .open    = __fops ## _open,                                     \
 153        .release = spufs_attr_release,                                  \
 154        .read    = spufs_attr_read,                                     \
 155        .write   = spufs_attr_write,                                    \
 156};
 157
 158
 159static int
 160spufs_mem_open(struct inode *inode, struct file *file)
 161{
 162        struct spufs_inode_info *i = SPUFS_I(inode);
 163        struct spu_context *ctx = i->i_ctx;
 164
 165        mutex_lock(&ctx->mapping_lock);
 166        file->private_data = ctx;
 167        if (!i->i_openers++)
 168                ctx->local_store = inode->i_mapping;
 169        mutex_unlock(&ctx->mapping_lock);
 170        return 0;
 171}
 172
 173static int
 174spufs_mem_release(struct inode *inode, struct file *file)
 175{
 176        struct spufs_inode_info *i = SPUFS_I(inode);
 177        struct spu_context *ctx = i->i_ctx;
 178
 179        mutex_lock(&ctx->mapping_lock);
 180        if (!--i->i_openers)
 181                ctx->local_store = NULL;
 182        mutex_unlock(&ctx->mapping_lock);
 183        return 0;
 184}
 185
 186static ssize_t
 187__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
 188                        size_t size, loff_t *pos)
 189{
 190        char *local_store = ctx->ops->get_ls(ctx);
 191        return simple_read_from_buffer(buffer, size, pos, local_store,
 192                                        LS_SIZE);
 193}
 194
 195static ssize_t
 196spufs_mem_read(struct file *file, char __user *buffer,
 197                                size_t size, loff_t *pos)
 198{
 199        struct spu_context *ctx = file->private_data;
 200        ssize_t ret;
 201
 202        ret = spu_acquire(ctx);
 203        if (ret)
 204                return ret;
 205        ret = __spufs_mem_read(ctx, buffer, size, pos);
 206        spu_release(ctx);
 207
 208        return ret;
 209}
 210
 211static ssize_t
 212spufs_mem_write(struct file *file, const char __user *buffer,
 213                                        size_t size, loff_t *ppos)
 214{
 215        struct spu_context *ctx = file->private_data;
 216        char *local_store;
 217        loff_t pos = *ppos;
 218        int ret;
 219
 220        if (pos < 0)
 221                return -EINVAL;
 222        if (pos > LS_SIZE)
 223                return -EFBIG;
 224        if (size > LS_SIZE - pos)
 225                size = LS_SIZE - pos;
 226
 227        ret = spu_acquire(ctx);
 228        if (ret)
 229                return ret;
 230
 231        local_store = ctx->ops->get_ls(ctx);
 232        ret = copy_from_user(local_store + pos, buffer, size);
 233        spu_release(ctx);
 234
 235        if (ret)
 236                return -EFAULT;
 237        *ppos = pos + size;
 238        return size;
 239}
 240
 241static int
 242spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 243{
 244        struct spu_context *ctx = vma->vm_file->private_data;
 245        unsigned long address = (unsigned long)vmf->virtual_address;
 246        unsigned long pfn, offset;
 247
 248#ifdef CONFIG_SPU_FS_64K_LS
 249        struct spu_state *csa = &ctx->csa;
 250        int psize;
 251
 252        /* Check what page size we are using */
 253        psize = get_slice_psize(vma->vm_mm, address);
 254
 255        /* Some sanity checking */
 256        BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
 257
 258        /* Wow, 64K, cool, we need to align the address though */
 259        if (csa->use_big_pages) {
 260                BUG_ON(vma->vm_start & 0xffff);
 261                address &= ~0xfffful;
 262        }
 263#endif /* CONFIG_SPU_FS_64K_LS */
 264
 265        offset = vmf->pgoff << PAGE_SHIFT;
 266        if (offset >= LS_SIZE)
 267                return VM_FAULT_SIGBUS;
 268
 269        pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
 270                        address, offset);
 271
 272        if (spu_acquire(ctx))
 273                return VM_FAULT_NOPAGE;
 274
 275        if (ctx->state == SPU_STATE_SAVED) {
 276                vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
 277                pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
 278        } else {
 279                vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
 280                pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
 281        }
 282        vm_insert_pfn(vma, address, pfn);
 283
 284        spu_release(ctx);
 285
 286        return VM_FAULT_NOPAGE;
 287}
 288
 289static int spufs_mem_mmap_access(struct vm_area_struct *vma,
 290                                unsigned long address,
 291                                void *buf, int len, int write)
 292{
 293        struct spu_context *ctx = vma->vm_file->private_data;
 294        unsigned long offset = address - vma->vm_start;
 295        char *local_store;
 296
 297        if (write && !(vma->vm_flags & VM_WRITE))
 298                return -EACCES;
 299        if (spu_acquire(ctx))
 300                return -EINTR;
 301        if ((offset + len) > vma->vm_end)
 302                len = vma->vm_end - offset;
 303        local_store = ctx->ops->get_ls(ctx);
 304        if (write)
 305                memcpy_toio(local_store + offset, buf, len);
 306        else
 307                memcpy_fromio(buf, local_store + offset, len);
 308        spu_release(ctx);
 309        return len;
 310}
 311
 312static const struct vm_operations_struct spufs_mem_mmap_vmops = {
 313        .fault = spufs_mem_mmap_fault,
 314        .access = spufs_mem_mmap_access,
 315};
 316
 317static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
 318{
 319#ifdef CONFIG_SPU_FS_64K_LS
 320        struct spu_context      *ctx = file->private_data;
 321        struct spu_state        *csa = &ctx->csa;
 322
 323        /* Sanity check VMA alignment */
 324        if (csa->use_big_pages) {
 325                pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
 326                         " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
 327                         vma->vm_pgoff);
 328                if (vma->vm_start & 0xffff)
 329                        return -EINVAL;
 330                if (vma->vm_pgoff & 0xf)
 331                        return -EINVAL;
 332        }
 333#endif /* CONFIG_SPU_FS_64K_LS */
 334
 335        if (!(vma->vm_flags & VM_SHARED))
 336                return -EINVAL;
 337
 338        vma->vm_flags |= VM_IO | VM_PFNMAP;
 339        vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
 340
 341        vma->vm_ops = &spufs_mem_mmap_vmops;
 342        return 0;
 343}
 344
 345#ifdef CONFIG_SPU_FS_64K_LS
 346static unsigned long spufs_get_unmapped_area(struct file *file,
 347                unsigned long addr, unsigned long len, unsigned long pgoff,
 348                unsigned long flags)
 349{
 350        struct spu_context      *ctx = file->private_data;
 351        struct spu_state        *csa = &ctx->csa;
 352
 353        /* If not using big pages, fallback to normal MM g_u_a */
 354        if (!csa->use_big_pages)
 355                return current->mm->get_unmapped_area(file, addr, len,
 356                                                      pgoff, flags);
 357
 358        /* Else, try to obtain a 64K pages slice */
 359        return slice_get_unmapped_area(addr, len, flags,
 360                                       MMU_PAGE_64K, 1, 0);
 361}
 362#endif /* CONFIG_SPU_FS_64K_LS */
 363
 364static const struct file_operations spufs_mem_fops = {
 365        .open                   = spufs_mem_open,
 366        .release                = spufs_mem_release,
 367        .read                   = spufs_mem_read,
 368        .write                  = spufs_mem_write,
 369        .llseek                 = generic_file_llseek,
 370        .mmap                   = spufs_mem_mmap,
 371#ifdef CONFIG_SPU_FS_64K_LS
 372        .get_unmapped_area      = spufs_get_unmapped_area,
 373#endif
 374};
 375
 376static int spufs_ps_fault(struct vm_area_struct *vma,
 377                                    struct vm_fault *vmf,
 378                                    unsigned long ps_offs,
 379                                    unsigned long ps_size)
 380{
 381        struct spu_context *ctx = vma->vm_file->private_data;
 382        unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
 383        int ret = 0;
 384
 385        spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
 386
 387        if (offset >= ps_size)
 388                return VM_FAULT_SIGBUS;
 389
 390        if (fatal_signal_pending(current))
 391                return VM_FAULT_SIGBUS;
 392
 393        /*
 394         * Because we release the mmap_sem, the context may be destroyed while
 395         * we're in spu_wait. Grab an extra reference so it isn't destroyed
 396         * in the meantime.
 397         */
 398        get_spu_context(ctx);
 399
 400        /*
 401         * We have to wait for context to be loaded before we have
 402         * pages to hand out to the user, but we don't want to wait
 403         * with the mmap_sem held.
 404         * It is possible to drop the mmap_sem here, but then we need
 405         * to return VM_FAULT_NOPAGE because the mappings may have
 406         * hanged.
 407         */
 408        if (spu_acquire(ctx))
 409                goto refault;
 410
 411        if (ctx->state == SPU_STATE_SAVED) {
 412                up_read(&current->mm->mmap_sem);
 413                spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
 414                ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
 415                spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
 416                down_read(&current->mm->mmap_sem);
 417        } else {
 418                area = ctx->spu->problem_phys + ps_offs;
 419                vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
 420                                        (area + offset) >> PAGE_SHIFT);
 421                spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
 422        }
 423
 424        if (!ret)
 425                spu_release(ctx);
 426
 427refault:
 428        put_spu_context(ctx);
 429        return VM_FAULT_NOPAGE;
 430}
 431
 432#if SPUFS_MMAP_4K
 433static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
 434                                           struct vm_fault *vmf)
 435{
 436        return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
 437}
 438
 439static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
 440        .fault = spufs_cntl_mmap_fault,
 441};
 442
 443/*
 444 * mmap support for problem state control area [0x4000 - 0x4fff].
 445 */
 446static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
 447{
 448        if (!(vma->vm_flags & VM_SHARED))
 449                return -EINVAL;
 450
 451        vma->vm_flags |= VM_IO | VM_PFNMAP;
 452        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 453
 454        vma->vm_ops = &spufs_cntl_mmap_vmops;
 455        return 0;
 456}
 457#else /* SPUFS_MMAP_4K */
 458#define spufs_cntl_mmap NULL
 459#endif /* !SPUFS_MMAP_4K */
 460
 461static int spufs_cntl_get(void *data, u64 *val)
 462{
 463        struct spu_context *ctx = data;
 464        int ret;
 465
 466        ret = spu_acquire(ctx);
 467        if (ret)
 468                return ret;
 469        *val = ctx->ops->status_read(ctx);
 470        spu_release(ctx);
 471
 472        return 0;
 473}
 474
 475static int spufs_cntl_set(void *data, u64 val)
 476{
 477        struct spu_context *ctx = data;
 478        int ret;
 479
 480        ret = spu_acquire(ctx);
 481        if (ret)
 482                return ret;
 483        ctx->ops->runcntl_write(ctx, val);
 484        spu_release(ctx);
 485
 486        return 0;
 487}
 488
 489static int spufs_cntl_open(struct inode *inode, struct file *file)
 490{
 491        struct spufs_inode_info *i = SPUFS_I(inode);
 492        struct spu_context *ctx = i->i_ctx;
 493
 494        mutex_lock(&ctx->mapping_lock);
 495        file->private_data = ctx;
 496        if (!i->i_openers++)
 497                ctx->cntl = inode->i_mapping;
 498        mutex_unlock(&ctx->mapping_lock);
 499        return simple_attr_open(inode, file, spufs_cntl_get,
 500                                        spufs_cntl_set, "0x%08lx");
 501}
 502
 503static int
 504spufs_cntl_release(struct inode *inode, struct file *file)
 505{
 506        struct spufs_inode_info *i = SPUFS_I(inode);
 507        struct spu_context *ctx = i->i_ctx;
 508
 509        simple_attr_release(inode, file);
 510
 511        mutex_lock(&ctx->mapping_lock);
 512        if (!--i->i_openers)
 513                ctx->cntl = NULL;
 514        mutex_unlock(&ctx->mapping_lock);
 515        return 0;
 516}
 517
 518static const struct file_operations spufs_cntl_fops = {
 519        .open = spufs_cntl_open,
 520        .release = spufs_cntl_release,
 521        .read = simple_attr_read,
 522        .write = simple_attr_write,
 523        .mmap = spufs_cntl_mmap,
 524};
 525
 526static int
 527spufs_regs_open(struct inode *inode, struct file *file)
 528{
 529        struct spufs_inode_info *i = SPUFS_I(inode);
 530        file->private_data = i->i_ctx;
 531        return 0;
 532}
 533
 534static ssize_t
 535__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
 536                        size_t size, loff_t *pos)
 537{
 538        struct spu_lscsa *lscsa = ctx->csa.lscsa;
 539        return simple_read_from_buffer(buffer, size, pos,
 540                                      lscsa->gprs, sizeof lscsa->gprs);
 541}
 542
 543static ssize_t
 544spufs_regs_read(struct file *file, char __user *buffer,
 545                size_t size, loff_t *pos)
 546{
 547        int ret;
 548        struct spu_context *ctx = file->private_data;
 549
 550        /* pre-check for file position: if we'd return EOF, there's no point
 551         * causing a deschedule */
 552        if (*pos >= sizeof(ctx->csa.lscsa->gprs))
 553                return 0;
 554
 555        ret = spu_acquire_saved(ctx);
 556        if (ret)
 557                return ret;
 558        ret = __spufs_regs_read(ctx, buffer, size, pos);
 559        spu_release_saved(ctx);
 560        return ret;
 561}
 562
 563static ssize_t
 564spufs_regs_write(struct file *file, const char __user *buffer,
 565                 size_t size, loff_t *pos)
 566{
 567        struct spu_context *ctx = file->private_data;
 568        struct spu_lscsa *lscsa = ctx->csa.lscsa;
 569        int ret;
 570
 571        if (*pos >= sizeof(lscsa->gprs))
 572                return -EFBIG;
 573
 574        size = min_t(ssize_t, sizeof(lscsa->gprs) - *pos, size);
 575        *pos += size;
 576
 577        ret = spu_acquire_saved(ctx);
 578        if (ret)
 579                return ret;
 580
 581        ret = copy_from_user((char *)lscsa->gprs + *pos - size,
 582                             buffer, size) ? -EFAULT : size;
 583
 584        spu_release_saved(ctx);
 585        return ret;
 586}
 587
 588static const struct file_operations spufs_regs_fops = {
 589        .open    = spufs_regs_open,
 590        .read    = spufs_regs_read,
 591        .write   = spufs_regs_write,
 592        .llseek  = generic_file_llseek,
 593};
 594
 595static ssize_t
 596__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
 597                        size_t size, loff_t * pos)
 598{
 599        struct spu_lscsa *lscsa = ctx->csa.lscsa;
 600        return simple_read_from_buffer(buffer, size, pos,
 601                                      &lscsa->fpcr, sizeof(lscsa->fpcr));
 602}
 603
 604static ssize_t
 605spufs_fpcr_read(struct file *file, char __user * buffer,
 606                size_t size, loff_t * pos)
 607{
 608        int ret;
 609        struct spu_context *ctx = file->private_data;
 610
 611        ret = spu_acquire_saved(ctx);
 612        if (ret)
 613                return ret;
 614        ret = __spufs_fpcr_read(ctx, buffer, size, pos);
 615        spu_release_saved(ctx);
 616        return ret;
 617}
 618
 619static ssize_t
 620spufs_fpcr_write(struct file *file, const char __user * buffer,
 621                 size_t size, loff_t * pos)
 622{
 623        struct spu_context *ctx = file->private_data;
 624        struct spu_lscsa *lscsa = ctx->csa.lscsa;
 625        int ret;
 626
 627        if (*pos >= sizeof(lscsa->fpcr))
 628                return -EFBIG;
 629
 630        size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
 631
 632        ret = spu_acquire_saved(ctx);
 633        if (ret)
 634                return ret;
 635
 636        *pos += size;
 637        ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
 638                             buffer, size) ? -EFAULT : size;
 639
 640        spu_release_saved(ctx);
 641        return ret;
 642}
 643
 644static const struct file_operations spufs_fpcr_fops = {
 645        .open = spufs_regs_open,
 646        .read = spufs_fpcr_read,
 647        .write = spufs_fpcr_write,
 648        .llseek = generic_file_llseek,
 649};
 650
 651/* generic open function for all pipe-like files */
 652static int spufs_pipe_open(struct inode *inode, struct file *file)
 653{
 654        struct spufs_inode_info *i = SPUFS_I(inode);
 655        file->private_data = i->i_ctx;
 656
 657        return nonseekable_open(inode, file);
 658}
 659
 660/*
 661 * Read as many bytes from the mailbox as possible, until
 662 * one of the conditions becomes true:
 663 *
 664 * - no more data available in the mailbox
 665 * - end of the user provided buffer
 666 * - end of the mapped area
 667 */
 668static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
 669                        size_t len, loff_t *pos)
 670{
 671        struct spu_context *ctx = file->private_data;
 672        u32 mbox_data, __user *udata;
 673        ssize_t count;
 674
 675        if (len < 4)
 676                return -EINVAL;
 677
 678        if (!access_ok(VERIFY_WRITE, buf, len))
 679                return -EFAULT;
 680
 681        udata = (void __user *)buf;
 682
 683        count = spu_acquire(ctx);
 684        if (count)
 685                return count;
 686
 687        for (count = 0; (count + 4) <= len; count += 4, udata++) {
 688                int ret;
 689                ret = ctx->ops->mbox_read(ctx, &mbox_data);
 690                if (ret == 0)
 691                        break;
 692
 693                /*
 694                 * at the end of the mapped area, we can fault
 695                 * but still need to return the data we have
 696                 * read successfully so far.
 697                 */
 698                ret = __put_user(mbox_data, udata);
 699                if (ret) {
 700                        if (!count)
 701                                count = -EFAULT;
 702                        break;
 703                }
 704        }
 705        spu_release(ctx);
 706
 707        if (!count)
 708                count = -EAGAIN;
 709
 710        return count;
 711}
 712
 713static const struct file_operations spufs_mbox_fops = {
 714        .open   = spufs_pipe_open,
 715        .read   = spufs_mbox_read,
 716};
 717
 718static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
 719                        size_t len, loff_t *pos)
 720{
 721        struct spu_context *ctx = file->private_data;
 722        ssize_t ret;
 723        u32 mbox_stat;
 724
 725        if (len < 4)
 726                return -EINVAL;
 727
 728        ret = spu_acquire(ctx);
 729        if (ret)
 730                return ret;
 731
 732        mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
 733
 734        spu_release(ctx);
 735
 736        if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
 737                return -EFAULT;
 738
 739        return 4;
 740}
 741
 742static const struct file_operations spufs_mbox_stat_fops = {
 743        .open   = spufs_pipe_open,
 744        .read   = spufs_mbox_stat_read,
 745};
 746
 747/* low-level ibox access function */
 748size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
 749{
 750        return ctx->ops->ibox_read(ctx, data);
 751}
 752
 753static int spufs_ibox_fasync(int fd, struct file *file, int on)
 754{
 755        struct spu_context *ctx = file->private_data;
 756
 757        return fasync_helper(fd, file, on, &ctx->ibox_fasync);
 758}
 759
 760/* interrupt-level ibox callback function. */
 761void spufs_ibox_callback(struct spu *spu)
 762{
 763        struct spu_context *ctx = spu->ctx;
 764
 765        if (!ctx)
 766                return;
 767
 768        wake_up_all(&ctx->ibox_wq);
 769        kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
 770}
 771
 772/*
 773 * Read as many bytes from the interrupt mailbox as possible, until
 774 * one of the conditions becomes true:
 775 *
 776 * - no more data available in the mailbox
 777 * - end of the user provided buffer
 778 * - end of the mapped area
 779 *
 780 * If the file is opened without O_NONBLOCK, we wait here until
 781 * any data is available, but return when we have been able to
 782 * read something.
 783 */
 784static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
 785                        size_t len, loff_t *pos)
 786{
 787        struct spu_context *ctx = file->private_data;
 788        u32 ibox_data, __user *udata;
 789        ssize_t count;
 790
 791        if (len < 4)
 792                return -EINVAL;
 793
 794        if (!access_ok(VERIFY_WRITE, buf, len))
 795                return -EFAULT;
 796
 797        udata = (void __user *)buf;
 798
 799        count = spu_acquire(ctx);
 800        if (count)
 801                goto out;
 802
 803        /* wait only for the first element */
 804        count = 0;
 805        if (file->f_flags & O_NONBLOCK) {
 806                if (!spu_ibox_read(ctx, &ibox_data)) {
 807                        count = -EAGAIN;
 808                        goto out_unlock;
 809                }
 810        } else {
 811                count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
 812                if (count)
 813                        goto out;
 814        }
 815
 816        /* if we can't write at all, return -EFAULT */
 817        count = __put_user(ibox_data, udata);
 818        if (count)
 819                goto out_unlock;
 820
 821        for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
 822                int ret;
 823                ret = ctx->ops->ibox_read(ctx, &ibox_data);
 824                if (ret == 0)
 825                        break;
 826                /*
 827                 * at the end of the mapped area, we can fault
 828                 * but still need to return the data we have
 829                 * read successfully so far.
 830                 */
 831                ret = __put_user(ibox_data, udata);
 832                if (ret)
 833                        break;
 834        }
 835
 836out_unlock:
 837        spu_release(ctx);
 838out:
 839        return count;
 840}
 841
 842static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
 843{
 844        struct spu_context *ctx = file->private_data;
 845        unsigned int mask;
 846
 847        poll_wait(file, &ctx->ibox_wq, wait);
 848
 849        /*
 850         * For now keep this uninterruptible and also ignore the rule
 851         * that poll should not sleep.  Will be fixed later.
 852         */
 853        mutex_lock(&ctx->state_mutex);
 854        mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
 855        spu_release(ctx);
 856
 857        return mask;
 858}
 859
 860static const struct file_operations spufs_ibox_fops = {
 861        .open   = spufs_pipe_open,
 862        .read   = spufs_ibox_read,
 863        .poll   = spufs_ibox_poll,
 864        .fasync = spufs_ibox_fasync,
 865};
 866
 867static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
 868                        size_t len, loff_t *pos)
 869{
 870        struct spu_context *ctx = file->private_data;
 871        ssize_t ret;
 872        u32 ibox_stat;
 873
 874        if (len < 4)
 875                return -EINVAL;
 876
 877        ret = spu_acquire(ctx);
 878        if (ret)
 879                return ret;
 880        ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
 881        spu_release(ctx);
 882
 883        if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
 884                return -EFAULT;
 885
 886        return 4;
 887}
 888
 889static const struct file_operations spufs_ibox_stat_fops = {
 890        .open   = spufs_pipe_open,
 891        .read   = spufs_ibox_stat_read,
 892};
 893
 894/* low-level mailbox write */
 895size_t spu_wbox_write(struct spu_context *ctx, u32 data)
 896{
 897        return ctx->ops->wbox_write(ctx, data);
 898}
 899
 900static int spufs_wbox_fasync(int fd, struct file *file, int on)
 901{
 902        struct spu_context *ctx = file->private_data;
 903        int ret;
 904
 905        ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
 906
 907        return ret;
 908}
 909
 910/* interrupt-level wbox callback function. */
 911void spufs_wbox_callback(struct spu *spu)
 912{
 913        struct spu_context *ctx = spu->ctx;
 914
 915        if (!ctx)
 916                return;
 917
 918        wake_up_all(&ctx->wbox_wq);
 919        kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
 920}
 921
 922/*
 923 * Write as many bytes to the interrupt mailbox as possible, until
 924 * one of the conditions becomes true:
 925 *
 926 * - the mailbox is full
 927 * - end of the user provided buffer
 928 * - end of the mapped area
 929 *
 930 * If the file is opened without O_NONBLOCK, we wait here until
 931 * space is availabyl, but return when we have been able to
 932 * write something.
 933 */
 934static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
 935                        size_t len, loff_t *pos)
 936{
 937        struct spu_context *ctx = file->private_data;
 938        u32 wbox_data, __user *udata;
 939        ssize_t count;
 940
 941        if (len < 4)
 942                return -EINVAL;
 943
 944        udata = (void __user *)buf;
 945        if (!access_ok(VERIFY_READ, buf, len))
 946                return -EFAULT;
 947
 948        if (__get_user(wbox_data, udata))
 949                return -EFAULT;
 950
 951        count = spu_acquire(ctx);
 952        if (count)
 953                goto out;
 954
 955        /*
 956         * make sure we can at least write one element, by waiting
 957         * in case of !O_NONBLOCK
 958         */
 959        count = 0;
 960        if (file->f_flags & O_NONBLOCK) {
 961                if (!spu_wbox_write(ctx, wbox_data)) {
 962                        count = -EAGAIN;
 963                        goto out_unlock;
 964                }
 965        } else {
 966                count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
 967                if (count)
 968                        goto out;
 969        }
 970
 971
 972        /* write as much as possible */
 973        for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
 974                int ret;
 975                ret = __get_user(wbox_data, udata);
 976                if (ret)
 977                        break;
 978
 979                ret = spu_wbox_write(ctx, wbox_data);
 980                if (ret == 0)
 981                        break;
 982        }
 983
 984out_unlock:
 985        spu_release(ctx);
 986out:
 987        return count;
 988}
 989
 990static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
 991{
 992        struct spu_context *ctx = file->private_data;
 993        unsigned int mask;
 994
 995        poll_wait(file, &ctx->wbox_wq, wait);
 996
 997        /*
 998         * For now keep this uninterruptible and also ignore the rule
 999         * that poll should not sleep.  Will be fixed later.
1000         */
1001        mutex_lock(&ctx->state_mutex);
1002        mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
1003        spu_release(ctx);
1004
1005        return mask;
1006}
1007
1008static const struct file_operations spufs_wbox_fops = {
1009        .open   = spufs_pipe_open,
1010        .write  = spufs_wbox_write,
1011        .poll   = spufs_wbox_poll,
1012        .fasync = spufs_wbox_fasync,
1013};
1014
1015static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
1016                        size_t len, loff_t *pos)
1017{
1018        struct spu_context *ctx = file->private_data;
1019        ssize_t ret;
1020        u32 wbox_stat;
1021
1022        if (len < 4)
1023                return -EINVAL;
1024
1025        ret = spu_acquire(ctx);
1026        if (ret)
1027                return ret;
1028        wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
1029        spu_release(ctx);
1030
1031        if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
1032                return -EFAULT;
1033
1034        return 4;
1035}
1036
1037static const struct file_operations spufs_wbox_stat_fops = {
1038        .open   = spufs_pipe_open,
1039        .read   = spufs_wbox_stat_read,
1040};
1041
1042static int spufs_signal1_open(struct inode *inode, struct file *file)
1043{
1044        struct spufs_inode_info *i = SPUFS_I(inode);
1045        struct spu_context *ctx = i->i_ctx;
1046
1047        mutex_lock(&ctx->mapping_lock);
1048        file->private_data = ctx;
1049        if (!i->i_openers++)
1050                ctx->signal1 = inode->i_mapping;
1051        mutex_unlock(&ctx->mapping_lock);
1052        return nonseekable_open(inode, file);
1053}
1054
1055static int
1056spufs_signal1_release(struct inode *inode, struct file *file)
1057{
1058        struct spufs_inode_info *i = SPUFS_I(inode);
1059        struct spu_context *ctx = i->i_ctx;
1060
1061        mutex_lock(&ctx->mapping_lock);
1062        if (!--i->i_openers)
1063                ctx->signal1 = NULL;
1064        mutex_unlock(&ctx->mapping_lock);
1065        return 0;
1066}
1067
1068static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
1069                        size_t len, loff_t *pos)
1070{
1071        int ret = 0;
1072        u32 data;
1073
1074        if (len < 4)
1075                return -EINVAL;
1076
1077        if (ctx->csa.spu_chnlcnt_RW[3]) {
1078                data = ctx->csa.spu_chnldata_RW[3];
1079                ret = 4;
1080        }
1081
1082        if (!ret)
1083                goto out;
1084
1085        if (copy_to_user(buf, &data, 4))
1086                return -EFAULT;
1087
1088out:
1089        return ret;
1090}
1091
1092static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
1093                        size_t len, loff_t *pos)
1094{
1095        int ret;
1096        struct spu_context *ctx = file->private_data;
1097
1098        ret = spu_acquire_saved(ctx);
1099        if (ret)
1100                return ret;
1101        ret = __spufs_signal1_read(ctx, buf, len, pos);
1102        spu_release_saved(ctx);
1103
1104        return ret;
1105}
1106
1107static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1108                        size_t len, loff_t *pos)
1109{
1110        struct spu_context *ctx;
1111        ssize_t ret;
1112        u32 data;
1113
1114        ctx = file->private_data;
1115
1116        if (len < 4)
1117                return -EINVAL;
1118
1119        if (copy_from_user(&data, buf, 4))
1120                return -EFAULT;
1121
1122        ret = spu_acquire(ctx);
1123        if (ret)
1124                return ret;
1125        ctx->ops->signal1_write(ctx, data);
1126        spu_release(ctx);
1127
1128        return 4;
1129}
1130
1131static int
1132spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1133{
1134#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1135        return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1136#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1137        /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1138         * signal 1 and 2 area
1139         */
1140        return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1141#else
1142#error unsupported page size
1143#endif
1144}
1145
1146static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
1147        .fault = spufs_signal1_mmap_fault,
1148};
1149
1150static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1151{
1152        if (!(vma->vm_flags & VM_SHARED))
1153                return -EINVAL;
1154
1155        vma->vm_flags |= VM_IO | VM_PFNMAP;
1156        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1157
1158        vma->vm_ops = &spufs_signal1_mmap_vmops;
1159        return 0;
1160}
1161
1162static const struct file_operations spufs_signal1_fops = {
1163        .open = spufs_signal1_open,
1164        .release = spufs_signal1_release,
1165        .read = spufs_signal1_read,
1166        .write = spufs_signal1_write,
1167        .mmap = spufs_signal1_mmap,
1168};
1169
1170static const struct file_operations spufs_signal1_nosched_fops = {
1171        .open = spufs_signal1_open,
1172        .release = spufs_signal1_release,
1173        .write = spufs_signal1_write,
1174        .mmap = spufs_signal1_mmap,
1175};
1176
1177static int spufs_signal2_open(struct inode *inode, struct file *file)
1178{
1179        struct spufs_inode_info *i = SPUFS_I(inode);
1180        struct spu_context *ctx = i->i_ctx;
1181
1182        mutex_lock(&ctx->mapping_lock);
1183        file->private_data = ctx;
1184        if (!i->i_openers++)
1185                ctx->signal2 = inode->i_mapping;
1186        mutex_unlock(&ctx->mapping_lock);
1187        return nonseekable_open(inode, file);
1188}
1189
1190static int
1191spufs_signal2_release(struct inode *inode, struct file *file)
1192{
1193        struct spufs_inode_info *i = SPUFS_I(inode);
1194        struct spu_context *ctx = i->i_ctx;
1195
1196        mutex_lock(&ctx->mapping_lock);
1197        if (!--i->i_openers)
1198                ctx->signal2 = NULL;
1199        mutex_unlock(&ctx->mapping_lock);
1200        return 0;
1201}
1202
1203static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
1204                        size_t len, loff_t *pos)
1205{
1206        int ret = 0;
1207        u32 data;
1208
1209        if (len < 4)
1210                return -EINVAL;
1211
1212        if (ctx->csa.spu_chnlcnt_RW[4]) {
1213                data =  ctx->csa.spu_chnldata_RW[4];
1214                ret = 4;
1215        }
1216
1217        if (!ret)
1218                goto out;
1219
1220        if (copy_to_user(buf, &data, 4))
1221                return -EFAULT;
1222
1223out:
1224        return ret;
1225}
1226
1227static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1228                        size_t len, loff_t *pos)
1229{
1230        struct spu_context *ctx = file->private_data;
1231        int ret;
1232
1233        ret = spu_acquire_saved(ctx);
1234        if (ret)
1235                return ret;
1236        ret = __spufs_signal2_read(ctx, buf, len, pos);
1237        spu_release_saved(ctx);
1238
1239        return ret;
1240}
1241
1242static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1243                        size_t len, loff_t *pos)
1244{
1245        struct spu_context *ctx;
1246        ssize_t ret;
1247        u32 data;
1248
1249        ctx = file->private_data;
1250
1251        if (len < 4)
1252                return -EINVAL;
1253
1254        if (copy_from_user(&data, buf, 4))
1255                return -EFAULT;
1256
1257        ret = spu_acquire(ctx);
1258        if (ret)
1259                return ret;
1260        ctx->ops->signal2_write(ctx, data);
1261        spu_release(ctx);
1262
1263        return 4;
1264}
1265
1266#if SPUFS_MMAP_4K
1267static int
1268spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1269{
1270#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1271        return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1272#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1273        /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1274         * signal 1 and 2 area
1275         */
1276        return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1277#else
1278#error unsupported page size
1279#endif
1280}
1281
1282static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
1283        .fault = spufs_signal2_mmap_fault,
1284};
1285
1286static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1287{
1288        if (!(vma->vm_flags & VM_SHARED))
1289                return -EINVAL;
1290
1291        vma->vm_flags |= VM_IO | VM_PFNMAP;
1292        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1293
1294        vma->vm_ops = &spufs_signal2_mmap_vmops;
1295        return 0;
1296}
1297#else /* SPUFS_MMAP_4K */
1298#define spufs_signal2_mmap NULL
1299#endif /* !SPUFS_MMAP_4K */
1300
1301static const struct file_operations spufs_signal2_fops = {
1302        .open = spufs_signal2_open,
1303        .release = spufs_signal2_release,
1304        .read = spufs_signal2_read,
1305        .write = spufs_signal2_write,
1306        .mmap = spufs_signal2_mmap,
1307};
1308
1309static const struct file_operations spufs_signal2_nosched_fops = {
1310        .open = spufs_signal2_open,
1311        .release = spufs_signal2_release,
1312        .write = spufs_signal2_write,
1313        .mmap = spufs_signal2_mmap,
1314};
1315
1316/*
1317 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1318 * work of acquiring (or not) the SPU context before calling through
1319 * to the actual get routine. The set routine is called directly.
1320 */
1321#define SPU_ATTR_NOACQUIRE      0
1322#define SPU_ATTR_ACQUIRE        1
1323#define SPU_ATTR_ACQUIRE_SAVED  2
1324
1325#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire)  \
1326static int __##__get(void *data, u64 *val)                              \
1327{                                                                       \
1328        struct spu_context *ctx = data;                                 \
1329        int ret = 0;                                                    \
1330                                                                        \
1331        if (__acquire == SPU_ATTR_ACQUIRE) {                            \
1332                ret = spu_acquire(ctx);                                 \
1333                if (ret)                                                \
1334                        return ret;                                     \
1335                *val = __get(ctx);                                      \
1336                spu_release(ctx);                                       \
1337        } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) {               \
1338                ret = spu_acquire_saved(ctx);                           \
1339                if (ret)                                                \
1340                        return ret;                                     \
1341                *val = __get(ctx);                                      \
1342                spu_release_saved(ctx);                                 \
1343        } else                                                          \
1344                *val = __get(ctx);                                      \
1345                                                                        \
1346        return 0;                                                       \
1347}                                                                       \
1348DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1349
1350static int spufs_signal1_type_set(void *data, u64 val)
1351{
1352        struct spu_context *ctx = data;
1353        int ret;
1354
1355        ret = spu_acquire(ctx);
1356        if (ret)
1357                return ret;
1358        ctx->ops->signal1_type_set(ctx, val);
1359        spu_release(ctx);
1360
1361        return 0;
1362}
1363
1364static u64 spufs_signal1_type_get(struct spu_context *ctx)
1365{
1366        return ctx->ops->signal1_type_get(ctx);
1367}
1368DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1369                       spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1370
1371
1372static int spufs_signal2_type_set(void *data, u64 val)
1373{
1374        struct spu_context *ctx = data;
1375        int ret;
1376
1377        ret = spu_acquire(ctx);
1378        if (ret)
1379                return ret;
1380        ctx->ops->signal2_type_set(ctx, val);
1381        spu_release(ctx);
1382
1383        return 0;
1384}
1385
1386static u64 spufs_signal2_type_get(struct spu_context *ctx)
1387{
1388        return ctx->ops->signal2_type_get(ctx);
1389}
1390DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1391                       spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1392
1393#if SPUFS_MMAP_4K
1394static int
1395spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1396{
1397        return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
1398}
1399
1400static const struct vm_operations_struct spufs_mss_mmap_vmops = {
1401        .fault = spufs_mss_mmap_fault,
1402};
1403
1404/*
1405 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1406 */
1407static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1408{
1409        if (!(vma->vm_flags & VM_SHARED))
1410                return -EINVAL;
1411
1412        vma->vm_flags |= VM_IO | VM_PFNMAP;
1413        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1414
1415        vma->vm_ops = &spufs_mss_mmap_vmops;
1416        return 0;
1417}
1418#else /* SPUFS_MMAP_4K */
1419#define spufs_mss_mmap NULL
1420#endif /* !SPUFS_MMAP_4K */
1421
1422static int spufs_mss_open(struct inode *inode, struct file *file)
1423{
1424        struct spufs_inode_info *i = SPUFS_I(inode);
1425        struct spu_context *ctx = i->i_ctx;
1426
1427        file->private_data = i->i_ctx;
1428
1429        mutex_lock(&ctx->mapping_lock);
1430        if (!i->i_openers++)
1431                ctx->mss = inode->i_mapping;
1432        mutex_unlock(&ctx->mapping_lock);
1433        return nonseekable_open(inode, file);
1434}
1435
1436static int
1437spufs_mss_release(struct inode *inode, struct file *file)
1438{
1439        struct spufs_inode_info *i = SPUFS_I(inode);
1440        struct spu_context *ctx = i->i_ctx;
1441
1442        mutex_lock(&ctx->mapping_lock);
1443        if (!--i->i_openers)
1444                ctx->mss = NULL;
1445        mutex_unlock(&ctx->mapping_lock);
1446        return 0;
1447}
1448
1449static const struct file_operations spufs_mss_fops = {
1450        .open    = spufs_mss_open,
1451        .release = spufs_mss_release,
1452        .mmap    = spufs_mss_mmap,
1453};
1454
1455static int
1456spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1457{
1458        return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
1459}
1460
1461static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
1462        .fault = spufs_psmap_mmap_fault,
1463};
1464
1465/*
1466 * mmap support for full problem state area [0x00000 - 0x1ffff].
1467 */
1468static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1469{
1470        if (!(vma->vm_flags & VM_SHARED))
1471                return -EINVAL;
1472
1473        vma->vm_flags |= VM_IO | VM_PFNMAP;
1474        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1475
1476        vma->vm_ops = &spufs_psmap_mmap_vmops;
1477        return 0;
1478}
1479
1480static int spufs_psmap_open(struct inode *inode, struct file *file)
1481{
1482        struct spufs_inode_info *i = SPUFS_I(inode);
1483        struct spu_context *ctx = i->i_ctx;
1484
1485        mutex_lock(&ctx->mapping_lock);
1486        file->private_data = i->i_ctx;
1487        if (!i->i_openers++)
1488                ctx->psmap = inode->i_mapping;
1489        mutex_unlock(&ctx->mapping_lock);
1490        return nonseekable_open(inode, file);
1491}
1492
1493static int
1494spufs_psmap_release(struct inode *inode, struct file *file)
1495{
1496        struct spufs_inode_info *i = SPUFS_I(inode);
1497        struct spu_context *ctx = i->i_ctx;
1498
1499        mutex_lock(&ctx->mapping_lock);
1500        if (!--i->i_openers)
1501                ctx->psmap = NULL;
1502        mutex_unlock(&ctx->mapping_lock);
1503        return 0;
1504}
1505
1506static const struct file_operations spufs_psmap_fops = {
1507        .open    = spufs_psmap_open,
1508        .release = spufs_psmap_release,
1509        .mmap    = spufs_psmap_mmap,
1510};
1511
1512
1513#if SPUFS_MMAP_4K
1514static int
1515spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1516{
1517        return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
1518}
1519
1520static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
1521        .fault = spufs_mfc_mmap_fault,
1522};
1523
1524/*
1525 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1526 */
1527static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1528{
1529        if (!(vma->vm_flags & VM_SHARED))
1530                return -EINVAL;
1531
1532        vma->vm_flags |= VM_IO | VM_PFNMAP;
1533        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1534
1535        vma->vm_ops = &spufs_mfc_mmap_vmops;
1536        return 0;
1537}
1538#else /* SPUFS_MMAP_4K */
1539#define spufs_mfc_mmap NULL
1540#endif /* !SPUFS_MMAP_4K */
1541
1542static int spufs_mfc_open(struct inode *inode, struct file *file)
1543{
1544        struct spufs_inode_info *i = SPUFS_I(inode);
1545        struct spu_context *ctx = i->i_ctx;
1546
1547        /* we don't want to deal with DMA into other processes */
1548        if (ctx->owner != current->mm)
1549                return -EINVAL;
1550
1551        if (atomic_read(&inode->i_count) != 1)
1552                return -EBUSY;
1553
1554        mutex_lock(&ctx->mapping_lock);
1555        file->private_data = ctx;
1556        if (!i->i_openers++)
1557                ctx->mfc = inode->i_mapping;
1558        mutex_unlock(&ctx->mapping_lock);
1559        return nonseekable_open(inode, file);
1560}
1561
1562static int
1563spufs_mfc_release(struct inode *inode, struct file *file)
1564{
1565        struct spufs_inode_info *i = SPUFS_I(inode);
1566        struct spu_context *ctx = i->i_ctx;
1567
1568        mutex_lock(&ctx->mapping_lock);
1569        if (!--i->i_openers)
1570                ctx->mfc = NULL;
1571        mutex_unlock(&ctx->mapping_lock);
1572        return 0;
1573}
1574
1575/* interrupt-level mfc callback function. */
1576void spufs_mfc_callback(struct spu *spu)
1577{
1578        struct spu_context *ctx = spu->ctx;
1579
1580        if (!ctx)
1581                return;
1582
1583        wake_up_all(&ctx->mfc_wq);
1584
1585        pr_debug("%s %s\n", __func__, spu->name);
1586        if (ctx->mfc_fasync) {
1587                u32 free_elements, tagstatus;
1588                unsigned int mask;
1589
1590                /* no need for spu_acquire in interrupt context */
1591                free_elements = ctx->ops->get_mfc_free_elements(ctx);
1592                tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1593
1594                mask = 0;
1595                if (free_elements & 0xffff)
1596                        mask |= POLLOUT;
1597                if (tagstatus & ctx->tagwait)
1598                        mask |= POLLIN;
1599
1600                kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1601        }
1602}
1603
1604static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1605{
1606        /* See if there is one tag group is complete */
1607        /* FIXME we need locking around tagwait */
1608        *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1609        ctx->tagwait &= ~*status;
1610        if (*status)
1611                return 1;
1612
1613        /* enable interrupt waiting for any tag group,
1614           may silently fail if interrupts are already enabled */
1615        ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1616        return 0;
1617}
1618
1619static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1620                        size_t size, loff_t *pos)
1621{
1622        struct spu_context *ctx = file->private_data;
1623        int ret = -EINVAL;
1624        u32 status;
1625
1626        if (size != 4)
1627                goto out;
1628
1629        ret = spu_acquire(ctx);
1630        if (ret)
1631                return ret;
1632
1633        ret = -EINVAL;
1634        if (file->f_flags & O_NONBLOCK) {
1635                status = ctx->ops->read_mfc_tagstatus(ctx);
1636                if (!(status & ctx->tagwait))
1637                        ret = -EAGAIN;
1638                else
1639                        /* XXX(hch): shouldn't we clear ret here? */
1640                        ctx->tagwait &= ~status;
1641        } else {
1642                ret = spufs_wait(ctx->mfc_wq,
1643                           spufs_read_mfc_tagstatus(ctx, &status));
1644                if (ret)
1645                        goto out;
1646        }
1647        spu_release(ctx);
1648
1649        ret = 4;
1650        if (copy_to_user(buffer, &status, 4))
1651                ret = -EFAULT;
1652
1653out:
1654        return ret;
1655}
1656
1657static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1658{
1659        pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
1660                 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1661
1662        switch (cmd->cmd) {
1663        case MFC_PUT_CMD:
1664        case MFC_PUTF_CMD:
1665        case MFC_PUTB_CMD:
1666        case MFC_GET_CMD:
1667        case MFC_GETF_CMD:
1668        case MFC_GETB_CMD:
1669                break;
1670        default:
1671                pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1672                return -EIO;
1673        }
1674
1675        if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1676                pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
1677                                cmd->ea, cmd->lsa);
1678                return -EIO;
1679        }
1680
1681        switch (cmd->size & 0xf) {
1682        case 1:
1683                break;
1684        case 2:
1685                if (cmd->lsa & 1)
1686                        goto error;
1687                break;
1688        case 4:
1689                if (cmd->lsa & 3)
1690                        goto error;
1691                break;
1692        case 8:
1693                if (cmd->lsa & 7)
1694                        goto error;
1695                break;
1696        case 0:
1697                if (cmd->lsa & 15)
1698                        goto error;
1699                break;
1700        error:
1701        default:
1702                pr_debug("invalid DMA alignment %x for size %x\n",
1703                        cmd->lsa & 0xf, cmd->size);
1704                return -EIO;
1705        }
1706
1707        if (cmd->size > 16 * 1024) {
1708                pr_debug("invalid DMA size %x\n", cmd->size);
1709                return -EIO;
1710        }
1711
1712        if (cmd->tag & 0xfff0) {
1713                /* we reserve the higher tag numbers for kernel use */
1714                pr_debug("invalid DMA tag\n");
1715                return -EIO;
1716        }
1717
1718        if (cmd->class) {
1719                /* not supported in this version */
1720                pr_debug("invalid DMA class\n");
1721                return -EIO;
1722        }
1723
1724        return 0;
1725}
1726
1727static int spu_send_mfc_command(struct spu_context *ctx,
1728                                struct mfc_dma_command cmd,
1729                                int *error)
1730{
1731        *error = ctx->ops->send_mfc_command(ctx, &cmd);
1732        if (*error == -EAGAIN) {
1733                /* wait for any tag group to complete
1734                   so we have space for the new command */
1735                ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1736                /* try again, because the queue might be
1737                   empty again */
1738                *error = ctx->ops->send_mfc_command(ctx, &cmd);
1739                if (*error == -EAGAIN)
1740                        return 0;
1741        }
1742        return 1;
1743}
1744
1745static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1746                        size_t size, loff_t *pos)
1747{
1748        struct spu_context *ctx = file->private_data;
1749        struct mfc_dma_command cmd;
1750        int ret = -EINVAL;
1751
1752        if (size != sizeof cmd)
1753                goto out;
1754
1755        ret = -EFAULT;
1756        if (copy_from_user(&cmd, buffer, sizeof cmd))
1757                goto out;
1758
1759        ret = spufs_check_valid_dma(&cmd);
1760        if (ret)
1761                goto out;
1762
1763        ret = spu_acquire(ctx);
1764        if (ret)
1765                goto out;
1766
1767        ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
1768        if (ret)
1769                goto out;
1770
1771        if (file->f_flags & O_NONBLOCK) {
1772                ret = ctx->ops->send_mfc_command(ctx, &cmd);
1773        } else {
1774                int status;
1775                ret = spufs_wait(ctx->mfc_wq,
1776                                 spu_send_mfc_command(ctx, cmd, &status));
1777                if (ret)
1778                        goto out;
1779                if (status)
1780                        ret = status;
1781        }
1782
1783        if (ret)
1784                goto out_unlock;
1785
1786        ctx->tagwait |= 1 << cmd.tag;
1787        ret = size;
1788
1789out_unlock:
1790        spu_release(ctx);
1791out:
1792        return ret;
1793}
1794
1795static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1796{
1797        struct spu_context *ctx = file->private_data;
1798        u32 free_elements, tagstatus;
1799        unsigned int mask;
1800
1801        poll_wait(file, &ctx->mfc_wq, wait);
1802
1803        /*
1804         * For now keep this uninterruptible and also ignore the rule
1805         * that poll should not sleep.  Will be fixed later.
1806         */
1807        mutex_lock(&ctx->state_mutex);
1808        ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1809        free_elements = ctx->ops->get_mfc_free_elements(ctx);
1810        tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1811        spu_release(ctx);
1812
1813        mask = 0;
1814        if (free_elements & 0xffff)
1815                mask |= POLLOUT | POLLWRNORM;
1816        if (tagstatus & ctx->tagwait)
1817                mask |= POLLIN | POLLRDNORM;
1818
1819        pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
1820                free_elements, tagstatus, ctx->tagwait);
1821
1822        return mask;
1823}
1824
1825static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1826{
1827        struct spu_context *ctx = file->private_data;
1828        int ret;
1829
1830        ret = spu_acquire(ctx);
1831        if (ret)
1832                goto out;
1833#if 0
1834/* this currently hangs */
1835        ret = spufs_wait(ctx->mfc_wq,
1836                         ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1837        if (ret)
1838                goto out;
1839        ret = spufs_wait(ctx->mfc_wq,
1840                         ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1841        if (ret)
1842                goto out;
1843#else
1844        ret = 0;
1845#endif
1846        spu_release(ctx);
1847out:
1848        return ret;
1849}
1850
1851static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1852                           int datasync)
1853{
1854        return spufs_mfc_flush(file, NULL);
1855}
1856
1857static int spufs_mfc_fasync(int fd, struct file *file, int on)
1858{
1859        struct spu_context *ctx = file->private_data;
1860
1861        return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1862}
1863
1864static const struct file_operations spufs_mfc_fops = {
1865        .open    = spufs_mfc_open,
1866        .release = spufs_mfc_release,
1867        .read    = spufs_mfc_read,
1868        .write   = spufs_mfc_write,
1869        .poll    = spufs_mfc_poll,
1870        .flush   = spufs_mfc_flush,
1871        .fsync   = spufs_mfc_fsync,
1872        .fasync  = spufs_mfc_fasync,
1873        .mmap    = spufs_mfc_mmap,
1874};
1875
1876static int spufs_npc_set(void *data, u64 val)
1877{
1878        struct spu_context *ctx = data;
1879        int ret;
1880
1881        ret = spu_acquire(ctx);
1882        if (ret)
1883                return ret;
1884        ctx->ops->npc_write(ctx, val);
1885        spu_release(ctx);
1886
1887        return 0;
1888}
1889
1890static u64 spufs_npc_get(struct spu_context *ctx)
1891{
1892        return ctx->ops->npc_read(ctx);
1893}
1894DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1895                       "0x%llx\n", SPU_ATTR_ACQUIRE);
1896
1897static int spufs_decr_set(void *data, u64 val)
1898{
1899        struct spu_context *ctx = data;
1900        struct spu_lscsa *lscsa = ctx->csa.lscsa;
1901        int ret;
1902
1903        ret = spu_acquire_saved(ctx);
1904        if (ret)
1905                return ret;
1906        lscsa->decr.slot[0] = (u32) val;
1907        spu_release_saved(ctx);
1908
1909        return 0;
1910}
1911
1912static u64 spufs_decr_get(struct spu_context *ctx)
1913{
1914        struct spu_lscsa *lscsa = ctx->csa.lscsa;
1915        return lscsa->decr.slot[0];
1916}
1917DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1918                       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1919
1920static int spufs_decr_status_set(void *data, u64 val)
1921{
1922        struct spu_context *ctx = data;
1923        int ret;
1924
1925        ret = spu_acquire_saved(ctx);
1926        if (ret)
1927                return ret;
1928        if (val)
1929                ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1930        else
1931                ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1932        spu_release_saved(ctx);
1933
1934        return 0;
1935}
1936
1937static u64 spufs_decr_status_get(struct spu_context *ctx)
1938{
1939        if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1940                return SPU_DECR_STATUS_RUNNING;
1941        else
1942                return 0;
1943}
1944DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1945                       spufs_decr_status_set, "0x%llx\n",
1946                       SPU_ATTR_ACQUIRE_SAVED);
1947
1948static int spufs_event_mask_set(void *data, u64 val)
1949{
1950        struct spu_context *ctx = data;
1951        struct spu_lscsa *lscsa = ctx->csa.lscsa;
1952        int ret;
1953
1954        ret = spu_acquire_saved(ctx);
1955        if (ret)
1956                return ret;
1957        lscsa->event_mask.slot[0] = (u32) val;
1958        spu_release_saved(ctx);
1959
1960        return 0;
1961}
1962
1963static u64 spufs_event_mask_get(struct spu_context *ctx)
1964{
1965        struct spu_lscsa *lscsa = ctx->csa.lscsa;
1966        return lscsa->event_mask.slot[0];
1967}
1968
1969DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1970                       spufs_event_mask_set, "0x%llx\n",
1971                       SPU_ATTR_ACQUIRE_SAVED);
1972
1973static u64 spufs_event_status_get(struct spu_context *ctx)
1974{
1975        struct spu_state *state = &ctx->csa;
1976        u64 stat;
1977        stat = state->spu_chnlcnt_RW[0];
1978        if (stat)
1979                return state->spu_chnldata_RW[0];
1980        return 0;
1981}
1982DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1983                       NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1984
1985static int spufs_srr0_set(void *data, u64 val)
1986{
1987        struct spu_context *ctx = data;
1988        struct spu_lscsa *lscsa = ctx->csa.lscsa;
1989        int ret;
1990
1991        ret = spu_acquire_saved(ctx);
1992        if (ret)
1993                return ret;
1994        lscsa->srr0.slot[0] = (u32) val;
1995        spu_release_saved(ctx);
1996
1997        return 0;
1998}
1999
2000static u64 spufs_srr0_get(struct spu_context *ctx)
2001{
2002        struct spu_lscsa *lscsa = ctx->csa.lscsa;
2003        return lscsa->srr0.slot[0];
2004}
2005DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
2006                       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
2007
2008static u64 spufs_id_get(struct spu_context *ctx)
2009{
2010        u64 num;
2011
2012        if (ctx->state == SPU_STATE_RUNNABLE)
2013                num = ctx->spu->number;
2014        else
2015                num = (unsigned int)-1;
2016
2017        return num;
2018}
2019DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
2020                       SPU_ATTR_ACQUIRE)
2021
2022static u64 spufs_object_id_get(struct spu_context *ctx)
2023{
2024        /* FIXME: Should there really be no locking here? */
2025        return ctx->object_id;
2026}
2027
2028static int spufs_object_id_set(void *data, u64 id)
2029{
2030        struct spu_context *ctx = data;
2031        ctx->object_id = id;
2032
2033        return 0;
2034}
2035
2036DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
2037                       spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
2038
2039static u64 spufs_lslr_get(struct spu_context *ctx)
2040{
2041        return ctx->csa.priv2.spu_lslr_RW;
2042}
2043DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
2044                       SPU_ATTR_ACQUIRE_SAVED);
2045
2046static int spufs_info_open(struct inode *inode, struct file *file)
2047{
2048        struct spufs_inode_info *i = SPUFS_I(inode);
2049        struct spu_context *ctx = i->i_ctx;
2050        file->private_data = ctx;
2051        return 0;
2052}
2053
2054static int spufs_caps_show(struct seq_file *s, void *private)
2055{
2056        struct spu_context *ctx = s->private;
2057
2058        if (!(ctx->flags & SPU_CREATE_NOSCHED))
2059                seq_puts(s, "sched\n");
2060        if (!(ctx->flags & SPU_CREATE_ISOLATE))
2061                seq_puts(s, "step\n");
2062        return 0;
2063}
2064
2065static int spufs_caps_open(struct inode *inode, struct file *file)
2066{
2067        return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
2068}
2069
2070static const struct file_operations spufs_caps_fops = {
2071        .open           = spufs_caps_open,
2072        .read           = seq_read,
2073        .llseek         = seq_lseek,
2074        .release        = single_release,
2075};
2076
2077static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
2078                        char __user *buf, size_t len, loff_t *pos)
2079{
2080        u32 data;
2081
2082        /* EOF if there's no entry in the mbox */
2083        if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
2084                return 0;
2085
2086        data = ctx->csa.prob.pu_mb_R;
2087
2088        return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2089}
2090
2091static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
2092                                   size_t len, loff_t *pos)
2093{
2094        int ret;
2095        struct spu_context *ctx = file->private_data;
2096
2097        if (!access_ok(VERIFY_WRITE, buf, len))
2098                return -EFAULT;
2099
2100        ret = spu_acquire_saved(ctx);
2101        if (ret)
2102                return ret;
2103        spin_lock(&ctx->csa.register_lock);
2104        ret = __spufs_mbox_info_read(ctx, buf, len, pos);
2105        spin_unlock(&ctx->csa.register_lock);
2106        spu_release_saved(ctx);
2107
2108        return ret;
2109}
2110
2111static const struct file_operations spufs_mbox_info_fops = {
2112        .open = spufs_info_open,
2113        .read = spufs_mbox_info_read,
2114        .llseek  = generic_file_llseek,
2115};
2116
2117static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
2118                                char __user *buf, size_t len, loff_t *pos)
2119{
2120        u32 data;
2121
2122        /* EOF if there's no entry in the ibox */
2123        if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
2124                return 0;
2125
2126        data = ctx->csa.priv2.puint_mb_R;
2127
2128        return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2129}
2130
2131static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
2132                                   size_t len, loff_t *pos)
2133{
2134        struct spu_context *ctx = file->private_data;
2135        int ret;
2136
2137        if (!access_ok(VERIFY_WRITE, buf, len))
2138                return -EFAULT;
2139
2140        ret = spu_acquire_saved(ctx);
2141        if (ret)
2142                return ret;
2143        spin_lock(&ctx->csa.register_lock);
2144        ret = __spufs_ibox_info_read(ctx, buf, len, pos);
2145        spin_unlock(&ctx->csa.register_lock);
2146        spu_release_saved(ctx);
2147
2148        return ret;
2149}
2150
2151static const struct file_operations spufs_ibox_info_fops = {
2152        .open = spufs_info_open,
2153        .read = spufs_ibox_info_read,
2154        .llseek  = generic_file_llseek,
2155};
2156
2157static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
2158                        char __user *buf, size_t len, loff_t *pos)
2159{
2160        int i, cnt;
2161        u32 data[4];
2162        u32 wbox_stat;
2163
2164        wbox_stat = ctx->csa.prob.mb_stat_R;
2165        cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
2166        for (i = 0; i < cnt; i++) {
2167                data[i] = ctx->csa.spu_mailbox_data[i];
2168        }
2169
2170        return simple_read_from_buffer(buf, len, pos, &data,
2171                                cnt * sizeof(u32));
2172}
2173
2174static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2175                                   size_t len, loff_t *pos)
2176{
2177        struct spu_context *ctx = file->private_data;
2178        int ret;
2179
2180        if (!access_ok(VERIFY_WRITE, buf, len))
2181                return -EFAULT;
2182
2183        ret = spu_acquire_saved(ctx);
2184        if (ret)
2185                return ret;
2186        spin_lock(&ctx->csa.register_lock);
2187        ret = __spufs_wbox_info_read(ctx, buf, len, pos);
2188        spin_unlock(&ctx->csa.register_lock);
2189        spu_release_saved(ctx);
2190
2191        return ret;
2192}
2193
2194static const struct file_operations spufs_wbox_info_fops = {
2195        .open = spufs_info_open,
2196        .read = spufs_wbox_info_read,
2197        .llseek  = generic_file_llseek,
2198};
2199
2200static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
2201                        char __user *buf, size_t len, loff_t *pos)
2202{
2203        struct spu_dma_info info;
2204        struct mfc_cq_sr *qp, *spuqp;
2205        int i;
2206
2207        info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2208        info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2209        info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
2210        info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2211        info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2212        for (i = 0; i < 16; i++) {
2213                qp = &info.dma_info_command_data[i];
2214                spuqp = &ctx->csa.priv2.spuq[i];
2215
2216                qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2217                qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2218                qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2219                qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2220        }
2221
2222        return simple_read_from_buffer(buf, len, pos, &info,
2223                                sizeof info);
2224}
2225
2226static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2227                              size_t len, loff_t *pos)
2228{
2229        struct spu_context *ctx = file->private_data;
2230        int ret;
2231
2232        if (!access_ok(VERIFY_WRITE, buf, len))
2233                return -EFAULT;
2234
2235        ret = spu_acquire_saved(ctx);
2236        if (ret)
2237                return ret;
2238        spin_lock(&ctx->csa.register_lock);
2239        ret = __spufs_dma_info_read(ctx, buf, len, pos);
2240        spin_unlock(&ctx->csa.register_lock);
2241        spu_release_saved(ctx);
2242
2243        return ret;
2244}
2245
2246static const struct file_operations spufs_dma_info_fops = {
2247        .open = spufs_info_open,
2248        .read = spufs_dma_info_read,
2249};
2250
2251static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2252                        char __user *buf, size_t len, loff_t *pos)
2253{
2254        struct spu_proxydma_info info;
2255        struct mfc_cq_sr *qp, *puqp;
2256        int ret = sizeof info;
2257        int i;
2258
2259        if (len < ret)
2260                return -EINVAL;
2261
2262        if (!access_ok(VERIFY_WRITE, buf, len))
2263                return -EFAULT;
2264
2265        info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2266        info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2267        info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2268        for (i = 0; i < 8; i++) {
2269                qp = &info.proxydma_info_command_data[i];
2270                puqp = &ctx->csa.priv2.puq[i];
2271
2272                qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2273                qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2274                qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2275                qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2276        }
2277
2278        return simple_read_from_buffer(buf, len, pos, &info,
2279                                sizeof info);
2280}
2281
2282static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2283                                   size_t len, loff_t *pos)
2284{
2285        struct spu_context *ctx = file->private_data;
2286        int ret;
2287
2288        ret = spu_acquire_saved(ctx);
2289        if (ret)
2290                return ret;
2291        spin_lock(&ctx->csa.register_lock);
2292        ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2293        spin_unlock(&ctx->csa.register_lock);
2294        spu_release_saved(ctx);
2295
2296        return ret;
2297}
2298
2299static const struct file_operations spufs_proxydma_info_fops = {
2300        .open = spufs_info_open,
2301        .read = spufs_proxydma_info_read,
2302};
2303
2304static int spufs_show_tid(struct seq_file *s, void *private)
2305{
2306        struct spu_context *ctx = s->private;
2307
2308        seq_printf(s, "%d\n", ctx->tid);
2309        return 0;
2310}
2311
2312static int spufs_tid_open(struct inode *inode, struct file *file)
2313{
2314        return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2315}
2316
2317static const struct file_operations spufs_tid_fops = {
2318        .open           = spufs_tid_open,
2319        .read           = seq_read,
2320        .llseek         = seq_lseek,
2321        .release        = single_release,
2322};
2323
2324static const char *ctx_state_names[] = {
2325        "user", "system", "iowait", "loaded"
2326};
2327
2328static unsigned long long spufs_acct_time(struct spu_context *ctx,
2329                enum spu_utilization_state state)
2330{
2331        struct timespec ts;
2332        unsigned long long time = ctx->stats.times[state];
2333
2334        /*
2335         * In general, utilization statistics are updated by the controlling
2336         * thread as the spu context moves through various well defined
2337         * state transitions, but if the context is lazily loaded its
2338         * utilization statistics are not updated as the controlling thread
2339         * is not tightly coupled with the execution of the spu context.  We
2340         * calculate and apply the time delta from the last recorded state
2341         * of the spu context.
2342         */
2343        if (ctx->spu && ctx->stats.util_state == state) {
2344                ktime_get_ts(&ts);
2345                time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2346        }
2347
2348        return time / NSEC_PER_MSEC;
2349}
2350
2351static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2352{
2353        unsigned long long slb_flts = ctx->stats.slb_flt;
2354
2355        if (ctx->state == SPU_STATE_RUNNABLE) {
2356                slb_flts += (ctx->spu->stats.slb_flt -
2357                             ctx->stats.slb_flt_base);
2358        }
2359
2360        return slb_flts;
2361}
2362
2363static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2364{
2365        unsigned long long class2_intrs = ctx->stats.class2_intr;
2366
2367        if (ctx->state == SPU_STATE_RUNNABLE) {
2368                class2_intrs += (ctx->spu->stats.class2_intr -
2369                                 ctx->stats.class2_intr_base);
2370        }
2371
2372        return class2_intrs;
2373}
2374
2375
2376static int spufs_show_stat(struct seq_file *s, void *private)
2377{
2378        struct spu_context *ctx = s->private;
2379        int ret;
2380
2381        ret = spu_acquire(ctx);
2382        if (ret)
2383                return ret;
2384
2385        seq_printf(s, "%s %llu %llu %llu %llu "
2386                      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2387                ctx_state_names[ctx->stats.util_state],
2388                spufs_acct_time(ctx, SPU_UTIL_USER),
2389                spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2390                spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2391                spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2392                ctx->stats.vol_ctx_switch,
2393                ctx->stats.invol_ctx_switch,
2394                spufs_slb_flts(ctx),
2395                ctx->stats.hash_flt,
2396                ctx->stats.min_flt,
2397                ctx->stats.maj_flt,
2398                spufs_class2_intrs(ctx),
2399                ctx->stats.libassist);
2400        spu_release(ctx);
2401        return 0;
2402}
2403
2404static int spufs_stat_open(struct inode *inode, struct file *file)
2405{
2406        return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2407}
2408
2409static const struct file_operations spufs_stat_fops = {
2410        .open           = spufs_stat_open,
2411        .read           = seq_read,
2412        .llseek         = seq_lseek,
2413        .release        = single_release,
2414};
2415
2416static inline int spufs_switch_log_used(struct spu_context *ctx)
2417{
2418        return (ctx->switch_log->head - ctx->switch_log->tail) %
2419                SWITCH_LOG_BUFSIZE;
2420}
2421
2422static inline int spufs_switch_log_avail(struct spu_context *ctx)
2423{
2424        return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
2425}
2426
2427static int spufs_switch_log_open(struct inode *inode, struct file *file)
2428{
2429        struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2430        int rc;
2431
2432        rc = spu_acquire(ctx);
2433        if (rc)
2434                return rc;
2435
2436        if (ctx->switch_log) {
2437                rc = -EBUSY;
2438                goto out;
2439        }
2440
2441        ctx->switch_log = kmalloc(sizeof(struct switch_log) +
2442                SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
2443                GFP_KERNEL);
2444
2445        if (!ctx->switch_log) {
2446                rc = -ENOMEM;
2447                goto out;
2448        }
2449
2450        ctx->switch_log->head = ctx->switch_log->tail = 0;
2451        init_waitqueue_head(&ctx->switch_log->wait);
2452        rc = 0;
2453
2454out:
2455        spu_release(ctx);
2456        return rc;
2457}
2458
2459static int spufs_switch_log_release(struct inode *inode, struct file *file)
2460{
2461        struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2462        int rc;
2463
2464        rc = spu_acquire(ctx);
2465        if (rc)
2466                return rc;
2467
2468        kfree(ctx->switch_log);
2469        ctx->switch_log = NULL;
2470        spu_release(ctx);
2471
2472        return 0;
2473}
2474
2475static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
2476{
2477        struct switch_log_entry *p;
2478
2479        p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
2480
2481        return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
2482                        (unsigned int) p->tstamp.tv_sec,
2483                        (unsigned int) p->tstamp.tv_nsec,
2484                        p->spu_id,
2485                        (unsigned int) p->type,
2486                        (unsigned int) p->val,
2487                        (unsigned long long) p->timebase);
2488}
2489
2490static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2491                             size_t len, loff_t *ppos)
2492{
2493        struct inode *inode = file->f_path.dentry->d_inode;
2494        struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2495        int error = 0, cnt = 0;
2496
2497        if (!buf || len < 0)
2498                return -EINVAL;
2499
2500        error = spu_acquire(ctx);
2501        if (error)
2502                return error;
2503
2504        while (cnt < len) {
2505                char tbuf[128];
2506                int width;
2507
2508                if (spufs_switch_log_used(ctx) == 0) {
2509                        if (cnt > 0) {
2510                                /* If there's data ready to go, we can
2511                                 * just return straight away */
2512                                break;
2513
2514                        } else if (file->f_flags & O_NONBLOCK) {
2515                                error = -EAGAIN;
2516                                break;
2517
2518                        } else {
2519                                /* spufs_wait will drop the mutex and
2520                                 * re-acquire, but since we're in read(), the
2521                                 * file cannot be _released (and so
2522                                 * ctx->switch_log is stable).
2523                                 */
2524                                error = spufs_wait(ctx->switch_log->wait,
2525                                                spufs_switch_log_used(ctx) > 0);
2526
2527                                /* On error, spufs_wait returns without the
2528                                 * state mutex held */
2529                                if (error)
2530                                        return error;
2531
2532                                /* We may have had entries read from underneath
2533                                 * us while we dropped the mutex in spufs_wait,
2534                                 * so re-check */
2535                                if (spufs_switch_log_used(ctx) == 0)
2536                                        continue;
2537                        }
2538                }
2539
2540                width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
2541                if (width < len)
2542                        ctx->switch_log->tail =
2543                                (ctx->switch_log->tail + 1) %
2544                                 SWITCH_LOG_BUFSIZE;
2545                else
2546                        /* If the record is greater than space available return
2547                         * partial buffer (so far) */
2548                        break;
2549
2550                error = copy_to_user(buf + cnt, tbuf, width);
2551                if (error)
2552                        break;
2553                cnt += width;
2554        }
2555
2556        spu_release(ctx);
2557
2558        return cnt == 0 ? error : cnt;
2559}
2560
2561static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
2562{
2563        struct inode *inode = file->f_path.dentry->d_inode;
2564        struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2565        unsigned int mask = 0;
2566        int rc;
2567
2568        poll_wait(file, &ctx->switch_log->wait, wait);
2569
2570        rc = spu_acquire(ctx);
2571        if (rc)
2572                return rc;
2573
2574        if (spufs_switch_log_used(ctx) > 0)
2575                mask |= POLLIN;
2576
2577        spu_release(ctx);
2578
2579        return mask;
2580}
2581
2582static const struct file_operations spufs_switch_log_fops = {
2583        .owner          = THIS_MODULE,
2584        .open           = spufs_switch_log_open,
2585        .read           = spufs_switch_log_read,
2586        .poll           = spufs_switch_log_poll,
2587        .release        = spufs_switch_log_release,
2588};
2589
2590/**
2591 * Log a context switch event to a switch log reader.
2592 *
2593 * Must be called with ctx->state_mutex held.
2594 */
2595void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2596                u32 type, u32 val)
2597{
2598        if (!ctx->switch_log)
2599                return;
2600
2601        if (spufs_switch_log_avail(ctx) > 1) {
2602                struct switch_log_entry *p;
2603
2604                p = ctx->switch_log->log + ctx->switch_log->head;
2605                ktime_get_ts(&p->tstamp);
2606                p->timebase = get_tb();
2607                p->spu_id = spu ? spu->number : -1;
2608                p->type = type;
2609                p->val = val;
2610
2611                ctx->switch_log->head =
2612                        (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2613        }
2614
2615        wake_up(&ctx->switch_log->wait);
2616}
2617
2618static int spufs_show_ctx(struct seq_file *s, void *private)
2619{
2620        struct spu_context *ctx = s->private;
2621        u64 mfc_control_RW;
2622
2623        mutex_lock(&ctx->state_mutex);
2624        if (ctx->spu) {
2625                struct spu *spu = ctx->spu;
2626                struct spu_priv2 __iomem *priv2 = spu->priv2;
2627
2628                spin_lock_irq(&spu->register_lock);
2629                mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2630                spin_unlock_irq(&spu->register_lock);
2631        } else {
2632                struct spu_state *csa = &ctx->csa;
2633
2634                mfc_control_RW = csa->priv2.mfc_control_RW;
2635        }
2636
2637        seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2638                " %c %llx %llx %llx %llx %x %x\n",
2639                ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2640                ctx->flags,
2641                ctx->sched_flags,
2642                ctx->prio,
2643                ctx->time_slice,
2644                ctx->spu ? ctx->spu->number : -1,
2645                !list_empty(&ctx->rq) ? 'q' : ' ',
2646                ctx->csa.class_0_pending,
2647                ctx->csa.class_0_dar,
2648                ctx->csa.class_1_dsisr,
2649                mfc_control_RW,
2650                ctx->ops->runcntl_read(ctx),
2651                ctx->ops->status_read(ctx));
2652
2653        mutex_unlock(&ctx->state_mutex);
2654
2655        return 0;
2656}
2657
2658static int spufs_ctx_open(struct inode *inode, struct file *file)
2659{
2660        return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2661}
2662
2663static const struct file_operations spufs_ctx_fops = {
2664        .open           = spufs_ctx_open,
2665        .read           = seq_read,
2666        .llseek         = seq_lseek,
2667        .release        = single_release,
2668};
2669
2670const struct spufs_tree_descr spufs_dir_contents[] = {
2671        { "capabilities", &spufs_caps_fops, 0444, },
2672        { "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
2673        { "regs", &spufs_regs_fops,  0666, sizeof(struct spu_reg128[128]), },
2674        { "mbox", &spufs_mbox_fops, 0444, },
2675        { "ibox", &spufs_ibox_fops, 0444, },
2676        { "wbox", &spufs_wbox_fops, 0222, },
2677        { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2678        { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2679        { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2680        { "signal1", &spufs_signal1_fops, 0666, },
2681        { "signal2", &spufs_signal2_fops, 0666, },
2682        { "signal1_type", &spufs_signal1_type, 0666, },
2683        { "signal2_type", &spufs_signal2_type, 0666, },
2684        { "cntl", &spufs_cntl_fops,  0666, },
2685        { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
2686        { "lslr", &spufs_lslr_ops, 0444, },
2687        { "mfc", &spufs_mfc_fops, 0666, },
2688        { "mss", &spufs_mss_fops, 0666, },
2689        { "npc", &spufs_npc_ops, 0666, },
2690        { "srr0", &spufs_srr0_ops, 0666, },
2691        { "decr", &spufs_decr_ops, 0666, },
2692        { "decr_status", &spufs_decr_status_ops, 0666, },
2693        { "event_mask", &spufs_event_mask_ops, 0666, },
2694        { "event_status", &spufs_event_status_ops, 0444, },
2695        { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2696        { "phys-id", &spufs_id_ops, 0666, },
2697        { "object-id", &spufs_object_id_ops, 0666, },
2698        { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2699        { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2700        { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2701        { "dma_info", &spufs_dma_info_fops, 0444,
2702                sizeof(struct spu_dma_info), },
2703        { "proxydma_info", &spufs_proxydma_info_fops, 0444,
2704                sizeof(struct spu_proxydma_info)},
2705        { "tid", &spufs_tid_fops, 0444, },
2706        { "stat", &spufs_stat_fops, 0444, },
2707        { "switch_log", &spufs_switch_log_fops, 0444 },
2708        {},
2709};
2710
2711const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
2712        { "capabilities", &spufs_caps_fops, 0444, },
2713        { "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
2714        { "mbox", &spufs_mbox_fops, 0444, },
2715        { "ibox", &spufs_ibox_fops, 0444, },
2716        { "wbox", &spufs_wbox_fops, 0222, },
2717        { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2718        { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2719        { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2720        { "signal1", &spufs_signal1_nosched_fops, 0222, },
2721        { "signal2", &spufs_signal2_nosched_fops, 0222, },
2722        { "signal1_type", &spufs_signal1_type, 0666, },
2723        { "signal2_type", &spufs_signal2_type, 0666, },
2724        { "mss", &spufs_mss_fops, 0666, },
2725        { "mfc", &spufs_mfc_fops, 0666, },
2726        { "cntl", &spufs_cntl_fops,  0666, },
2727        { "npc", &spufs_npc_ops, 0666, },
2728        { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2729        { "phys-id", &spufs_id_ops, 0666, },
2730        { "object-id", &spufs_object_id_ops, 0666, },
2731        { "tid", &spufs_tid_fops, 0444, },
2732        { "stat", &spufs_stat_fops, 0444, },
2733        {},
2734};
2735
2736const struct spufs_tree_descr spufs_dir_debug_contents[] = {
2737        { ".ctx", &spufs_ctx_fops, 0444, },
2738        {},
2739};
2740
2741const struct spufs_coredump_reader spufs_coredump_read[] = {
2742        { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2743        { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
2744        { "lslr", NULL, spufs_lslr_get, 19 },
2745        { "decr", NULL, spufs_decr_get, 19 },
2746        { "decr_status", NULL, spufs_decr_status_get, 19 },
2747        { "mem", __spufs_mem_read, NULL, LS_SIZE, },
2748        { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
2749        { "signal1_type", NULL, spufs_signal1_type_get, 19 },
2750        { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
2751        { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2752        { "event_mask", NULL, spufs_event_mask_get, 19 },
2753        { "event_status", NULL, spufs_event_status_get, 19 },
2754        { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2755        { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2756        { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2757        { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2758        { "proxydma_info", __spufs_proxydma_info_read,
2759                           NULL, sizeof(struct spu_proxydma_info)},
2760        { "object-id", NULL, spufs_object_id_get, 19 },
2761        { "npc", NULL, spufs_npc_get, 19 },
2762        { NULL },
2763};
2764