linux/arch/powerpc/platforms/cell/spu_base.c
<<
>>
Prefs
   1/*
   2 * Low-level SPU handling
   3 *
   4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
   5 *
   6 * Author: Arnd Bergmann <arndb@de.ibm.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2, or (at your option)
  11 * any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23#undef DEBUG
  24
  25#include <linux/interrupt.h>
  26#include <linux/list.h>
  27#include <linux/module.h>
  28#include <linux/ptrace.h>
  29#include <linux/slab.h>
  30#include <linux/wait.h>
  31#include <linux/mm.h>
  32#include <linux/io.h>
  33#include <linux/mutex.h>
  34#include <linux/linux_logo.h>
  35#include <asm/spu.h>
  36#include <asm/spu_priv1.h>
  37#include <asm/spu_csa.h>
  38#include <asm/xmon.h>
  39#include <asm/prom.h>
  40#include <asm/kexec.h>
  41
  42const struct spu_management_ops *spu_management_ops;
  43EXPORT_SYMBOL_GPL(spu_management_ops);
  44
  45const struct spu_priv1_ops *spu_priv1_ops;
  46EXPORT_SYMBOL_GPL(spu_priv1_ops);
  47
  48struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
  49EXPORT_SYMBOL_GPL(cbe_spu_info);
  50
  51/*
  52 * The spufs fault-handling code needs to call force_sig_info to raise signals
  53 * on DMA errors. Export it here to avoid general kernel-wide access to this
  54 * function
  55 */
  56EXPORT_SYMBOL_GPL(force_sig_info);
  57
  58/*
  59 * Protects cbe_spu_info and spu->number.
  60 */
  61static DEFINE_SPINLOCK(spu_lock);
  62
  63/*
  64 * List of all spus in the system.
  65 *
  66 * This list is iterated by callers from irq context and callers that
  67 * want to sleep.  Thus modifications need to be done with both
  68 * spu_full_list_lock and spu_full_list_mutex held, while iterating
  69 * through it requires either of these locks.
  70 *
  71 * In addition spu_full_list_lock protects all assignmens to
  72 * spu->mm.
  73 */
  74static LIST_HEAD(spu_full_list);
  75static DEFINE_SPINLOCK(spu_full_list_lock);
  76static DEFINE_MUTEX(spu_full_list_mutex);
  77
  78struct spu_slb {
  79        u64 esid, vsid;
  80};
  81
  82void spu_invalidate_slbs(struct spu *spu)
  83{
  84        struct spu_priv2 __iomem *priv2 = spu->priv2;
  85        unsigned long flags;
  86
  87        spin_lock_irqsave(&spu->register_lock, flags);
  88        if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
  89                out_be64(&priv2->slb_invalidate_all_W, 0UL);
  90        spin_unlock_irqrestore(&spu->register_lock, flags);
  91}
  92EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
  93
  94/* This is called by the MM core when a segment size is changed, to
  95 * request a flush of all the SPEs using a given mm
  96 */
  97void spu_flush_all_slbs(struct mm_struct *mm)
  98{
  99        struct spu *spu;
 100        unsigned long flags;
 101
 102        spin_lock_irqsave(&spu_full_list_lock, flags);
 103        list_for_each_entry(spu, &spu_full_list, full_list) {
 104                if (spu->mm == mm)
 105                        spu_invalidate_slbs(spu);
 106        }
 107        spin_unlock_irqrestore(&spu_full_list_lock, flags);
 108}
 109
 110/* The hack below stinks... try to do something better one of
 111 * these days... Does it even work properly with NR_CPUS == 1 ?
 112 */
 113static inline void mm_needs_global_tlbie(struct mm_struct *mm)
 114{
 115        int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
 116
 117        /* Global TLBIE broadcast required with SPEs. */
 118        bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
 119}
 120
 121void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
 122{
 123        unsigned long flags;
 124
 125        spin_lock_irqsave(&spu_full_list_lock, flags);
 126        spu->mm = mm;
 127        spin_unlock_irqrestore(&spu_full_list_lock, flags);
 128        if (mm)
 129                mm_needs_global_tlbie(mm);
 130}
 131EXPORT_SYMBOL_GPL(spu_associate_mm);
 132
 133int spu_64k_pages_available(void)
 134{
 135        return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
 136}
 137EXPORT_SYMBOL_GPL(spu_64k_pages_available);
 138
 139static void spu_restart_dma(struct spu *spu)
 140{
 141        struct spu_priv2 __iomem *priv2 = spu->priv2;
 142
 143        if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
 144                out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
 145        else {
 146                set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
 147                mb();
 148        }
 149}
 150
 151static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
 152{
 153        struct spu_priv2 __iomem *priv2 = spu->priv2;
 154
 155        pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
 156                        __func__, slbe, slb->vsid, slb->esid);
 157
 158        out_be64(&priv2->slb_index_W, slbe);
 159        /* set invalid before writing vsid */
 160        out_be64(&priv2->slb_esid_RW, 0);
 161        /* now it's safe to write the vsid */
 162        out_be64(&priv2->slb_vsid_RW, slb->vsid);
 163        /* setting the new esid makes the entry valid again */
 164        out_be64(&priv2->slb_esid_RW, slb->esid);
 165}
 166
 167static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
 168{
 169        struct mm_struct *mm = spu->mm;
 170        struct spu_slb slb;
 171        int psize;
 172
 173        pr_debug("%s\n", __func__);
 174
 175        slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
 176
 177        switch(REGION_ID(ea)) {
 178        case USER_REGION_ID:
 179#ifdef CONFIG_PPC_MM_SLICES
 180                psize = get_slice_psize(mm, ea);
 181#else
 182                psize = mm->context.user_psize;
 183#endif
 184                slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
 185                                << SLB_VSID_SHIFT) | SLB_VSID_USER;
 186                break;
 187        case VMALLOC_REGION_ID:
 188                if (ea < VMALLOC_END)
 189                        psize = mmu_vmalloc_psize;
 190                else
 191                        psize = mmu_io_psize;
 192                slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
 193                                << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
 194                break;
 195        case KERNEL_REGION_ID:
 196                psize = mmu_linear_psize;
 197                slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
 198                                << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
 199                break;
 200        default:
 201                /* Future: support kernel segments so that drivers
 202                 * can use SPUs.
 203                 */
 204                pr_debug("invalid region access at %016lx\n", ea);
 205                return 1;
 206        }
 207        slb.vsid |= mmu_psize_defs[psize].sllp;
 208
 209        spu_load_slb(spu, spu->slb_replace, &slb);
 210
 211        spu->slb_replace++;
 212        if (spu->slb_replace >= 8)
 213                spu->slb_replace = 0;
 214
 215        spu_restart_dma(spu);
 216        spu->stats.slb_flt++;
 217        return 0;
 218}
 219
 220extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
 221static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
 222{
 223        int ret;
 224
 225        pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
 226
 227        /*
 228         * Handle kernel space hash faults immediately. User hash
 229         * faults need to be deferred to process context.
 230         */
 231        if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
 232            (REGION_ID(ea) != USER_REGION_ID)) {
 233
 234                spin_unlock(&spu->register_lock);
 235                ret = hash_page(ea, _PAGE_PRESENT, 0x300);
 236                spin_lock(&spu->register_lock);
 237
 238                if (!ret) {
 239                        spu_restart_dma(spu);
 240                        return 0;
 241                }
 242        }
 243
 244        spu->class_1_dar = ea;
 245        spu->class_1_dsisr = dsisr;
 246
 247        spu->stop_callback(spu, 1);
 248
 249        spu->class_1_dar = 0;
 250        spu->class_1_dsisr = 0;
 251
 252        return 0;
 253}
 254
 255static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
 256{
 257        unsigned long ea = (unsigned long)addr;
 258        u64 llp;
 259
 260        if (REGION_ID(ea) == KERNEL_REGION_ID)
 261                llp = mmu_psize_defs[mmu_linear_psize].sllp;
 262        else
 263                llp = mmu_psize_defs[mmu_virtual_psize].sllp;
 264
 265        slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
 266                SLB_VSID_KERNEL | llp;
 267        slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
 268}
 269
 270/**
 271 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
 272 * address @new_addr is present.
 273 */
 274static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
 275                void *new_addr)
 276{
 277        unsigned long ea = (unsigned long)new_addr;
 278        int i;
 279
 280        for (i = 0; i < nr_slbs; i++)
 281                if (!((slbs[i].esid ^ ea) & ESID_MASK))
 282                        return 1;
 283
 284        return 0;
 285}
 286
 287/**
 288 * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
 289 * need to map both the context save area, and the save/restore code.
 290 *
 291 * Because the lscsa and code may cross segment boundaires, we check to see
 292 * if mappings are required for the start and end of each range. We currently
 293 * assume that the mappings are smaller that one segment - if not, something
 294 * is seriously wrong.
 295 */
 296void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
 297                void *code, int code_size)
 298{
 299        struct spu_slb slbs[4];
 300        int i, nr_slbs = 0;
 301        /* start and end addresses of both mappings */
 302        void *addrs[] = {
 303                lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
 304                code, code + code_size - 1
 305        };
 306
 307        /* check the set of addresses, and create a new entry in the slbs array
 308         * if there isn't already a SLB for that address */
 309        for (i = 0; i < ARRAY_SIZE(addrs); i++) {
 310                if (__slb_present(slbs, nr_slbs, addrs[i]))
 311                        continue;
 312
 313                __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
 314                nr_slbs++;
 315        }
 316
 317        spin_lock_irq(&spu->register_lock);
 318        /* Add the set of SLBs */
 319        for (i = 0; i < nr_slbs; i++)
 320                spu_load_slb(spu, i, &slbs[i]);
 321        spin_unlock_irq(&spu->register_lock);
 322}
 323EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
 324
 325static irqreturn_t
 326spu_irq_class_0(int irq, void *data)
 327{
 328        struct spu *spu;
 329        unsigned long stat, mask;
 330
 331        spu = data;
 332
 333        spin_lock(&spu->register_lock);
 334        mask = spu_int_mask_get(spu, 0);
 335        stat = spu_int_stat_get(spu, 0) & mask;
 336
 337        spu->class_0_pending |= stat;
 338        spu->class_0_dar = spu_mfc_dar_get(spu);
 339        spu->stop_callback(spu, 0);
 340        spu->class_0_pending = 0;
 341        spu->class_0_dar = 0;
 342
 343        spu_int_stat_clear(spu, 0, stat);
 344        spin_unlock(&spu->register_lock);
 345
 346        return IRQ_HANDLED;
 347}
 348
 349static irqreturn_t
 350spu_irq_class_1(int irq, void *data)
 351{
 352        struct spu *spu;
 353        unsigned long stat, mask, dar, dsisr;
 354
 355        spu = data;
 356
 357        /* atomically read & clear class1 status. */
 358        spin_lock(&spu->register_lock);
 359        mask  = spu_int_mask_get(spu, 1);
 360        stat  = spu_int_stat_get(spu, 1) & mask;
 361        dar   = spu_mfc_dar_get(spu);
 362        dsisr = spu_mfc_dsisr_get(spu);
 363        if (stat & CLASS1_STORAGE_FAULT_INTR)
 364                spu_mfc_dsisr_set(spu, 0ul);
 365        spu_int_stat_clear(spu, 1, stat);
 366
 367        pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
 368                        dar, dsisr);
 369
 370        if (stat & CLASS1_SEGMENT_FAULT_INTR)
 371                __spu_trap_data_seg(spu, dar);
 372
 373        if (stat & CLASS1_STORAGE_FAULT_INTR)
 374                __spu_trap_data_map(spu, dar, dsisr);
 375
 376        if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
 377                ;
 378
 379        if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
 380                ;
 381
 382        spu->class_1_dsisr = 0;
 383        spu->class_1_dar = 0;
 384
 385        spin_unlock(&spu->register_lock);
 386
 387        return stat ? IRQ_HANDLED : IRQ_NONE;
 388}
 389
 390static irqreturn_t
 391spu_irq_class_2(int irq, void *data)
 392{
 393        struct spu *spu;
 394        unsigned long stat;
 395        unsigned long mask;
 396        const int mailbox_intrs =
 397                CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
 398
 399        spu = data;
 400        spin_lock(&spu->register_lock);
 401        stat = spu_int_stat_get(spu, 2);
 402        mask = spu_int_mask_get(spu, 2);
 403        /* ignore interrupts we're not waiting for */
 404        stat &= mask;
 405        /* mailbox interrupts are level triggered. mask them now before
 406         * acknowledging */
 407        if (stat & mailbox_intrs)
 408                spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
 409        /* acknowledge all interrupts before the callbacks */
 410        spu_int_stat_clear(spu, 2, stat);
 411
 412        pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
 413
 414        if (stat & CLASS2_MAILBOX_INTR)
 415                spu->ibox_callback(spu);
 416
 417        if (stat & CLASS2_SPU_STOP_INTR)
 418                spu->stop_callback(spu, 2);
 419
 420        if (stat & CLASS2_SPU_HALT_INTR)
 421                spu->stop_callback(spu, 2);
 422
 423        if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
 424                spu->mfc_callback(spu);
 425
 426        if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
 427                spu->wbox_callback(spu);
 428
 429        spu->stats.class2_intr++;
 430
 431        spin_unlock(&spu->register_lock);
 432
 433        return stat ? IRQ_HANDLED : IRQ_NONE;
 434}
 435
 436static int spu_request_irqs(struct spu *spu)
 437{
 438        int ret = 0;
 439
 440        if (spu->irqs[0] != NO_IRQ) {
 441                snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
 442                         spu->number);
 443                ret = request_irq(spu->irqs[0], spu_irq_class_0,
 444                                  IRQF_DISABLED,
 445                                  spu->irq_c0, spu);
 446                if (ret)
 447                        goto bail0;
 448        }
 449        if (spu->irqs[1] != NO_IRQ) {
 450                snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
 451                         spu->number);
 452                ret = request_irq(spu->irqs[1], spu_irq_class_1,
 453                                  IRQF_DISABLED,
 454                                  spu->irq_c1, spu);
 455                if (ret)
 456                        goto bail1;
 457        }
 458        if (spu->irqs[2] != NO_IRQ) {
 459                snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
 460                         spu->number);
 461                ret = request_irq(spu->irqs[2], spu_irq_class_2,
 462                                  IRQF_DISABLED,
 463                                  spu->irq_c2, spu);
 464                if (ret)
 465                        goto bail2;
 466        }
 467        return 0;
 468
 469bail2:
 470        if (spu->irqs[1] != NO_IRQ)
 471                free_irq(spu->irqs[1], spu);
 472bail1:
 473        if (spu->irqs[0] != NO_IRQ)
 474                free_irq(spu->irqs[0], spu);
 475bail0:
 476        return ret;
 477}
 478
 479static void spu_free_irqs(struct spu *spu)
 480{
 481        if (spu->irqs[0] != NO_IRQ)
 482                free_irq(spu->irqs[0], spu);
 483        if (spu->irqs[1] != NO_IRQ)
 484                free_irq(spu->irqs[1], spu);
 485        if (spu->irqs[2] != NO_IRQ)
 486                free_irq(spu->irqs[2], spu);
 487}
 488
 489void spu_init_channels(struct spu *spu)
 490{
 491        static const struct {
 492                 unsigned channel;
 493                 unsigned count;
 494        } zero_list[] = {
 495                { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
 496                { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
 497        }, count_list[] = {
 498                { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
 499                { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
 500                { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
 501        };
 502        struct spu_priv2 __iomem *priv2;
 503        int i;
 504
 505        priv2 = spu->priv2;
 506
 507        /* initialize all channel data to zero */
 508        for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
 509                int count;
 510
 511                out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
 512                for (count = 0; count < zero_list[i].count; count++)
 513                        out_be64(&priv2->spu_chnldata_RW, 0);
 514        }
 515
 516        /* initialize channel counts to meaningful values */
 517        for (i = 0; i < ARRAY_SIZE(count_list); i++) {
 518                out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
 519                out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
 520        }
 521}
 522EXPORT_SYMBOL_GPL(spu_init_channels);
 523
 524static int spu_shutdown(struct sys_device *sysdev)
 525{
 526        struct spu *spu = container_of(sysdev, struct spu, sysdev);
 527
 528        spu_free_irqs(spu);
 529        spu_destroy_spu(spu);
 530        return 0;
 531}
 532
 533static struct sysdev_class spu_sysdev_class = {
 534        .name = "spu",
 535        .shutdown = spu_shutdown,
 536};
 537
 538int spu_add_sysdev_attr(struct sysdev_attribute *attr)
 539{
 540        struct spu *spu;
 541
 542        mutex_lock(&spu_full_list_mutex);
 543        list_for_each_entry(spu, &spu_full_list, full_list)
 544                sysdev_create_file(&spu->sysdev, attr);
 545        mutex_unlock(&spu_full_list_mutex);
 546
 547        return 0;
 548}
 549EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
 550
 551int spu_add_sysdev_attr_group(struct attribute_group *attrs)
 552{
 553        struct spu *spu;
 554        int rc = 0;
 555
 556        mutex_lock(&spu_full_list_mutex);
 557        list_for_each_entry(spu, &spu_full_list, full_list) {
 558                rc = sysfs_create_group(&spu->sysdev.kobj, attrs);
 559
 560                /* we're in trouble here, but try unwinding anyway */
 561                if (rc) {
 562                        printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
 563                                        __func__, attrs->name);
 564
 565                        list_for_each_entry_continue_reverse(spu,
 566                                        &spu_full_list, full_list)
 567                                sysfs_remove_group(&spu->sysdev.kobj, attrs);
 568                        break;
 569                }
 570        }
 571
 572        mutex_unlock(&spu_full_list_mutex);
 573
 574        return rc;
 575}
 576EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
 577
 578
 579void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
 580{
 581        struct spu *spu;
 582
 583        mutex_lock(&spu_full_list_mutex);
 584        list_for_each_entry(spu, &spu_full_list, full_list)
 585                sysdev_remove_file(&spu->sysdev, attr);
 586        mutex_unlock(&spu_full_list_mutex);
 587}
 588EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
 589
 590void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
 591{
 592        struct spu *spu;
 593
 594        mutex_lock(&spu_full_list_mutex);
 595        list_for_each_entry(spu, &spu_full_list, full_list)
 596                sysfs_remove_group(&spu->sysdev.kobj, attrs);
 597        mutex_unlock(&spu_full_list_mutex);
 598}
 599EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
 600
 601static int spu_create_sysdev(struct spu *spu)
 602{
 603        int ret;
 604
 605        spu->sysdev.id = spu->number;
 606        spu->sysdev.cls = &spu_sysdev_class;
 607        ret = sysdev_register(&spu->sysdev);
 608        if (ret) {
 609                printk(KERN_ERR "Can't register SPU %d with sysfs\n",
 610                                spu->number);
 611                return ret;
 612        }
 613
 614        sysfs_add_device_to_node(&spu->sysdev, spu->node);
 615
 616        return 0;
 617}
 618
 619static int __init create_spu(void *data)
 620{
 621        struct spu *spu;
 622        int ret;
 623        static int number;
 624        unsigned long flags;
 625        struct timespec ts;
 626
 627        ret = -ENOMEM;
 628        spu = kzalloc(sizeof (*spu), GFP_KERNEL);
 629        if (!spu)
 630                goto out;
 631
 632        spu->alloc_state = SPU_FREE;
 633
 634        spin_lock_init(&spu->register_lock);
 635        spin_lock(&spu_lock);
 636        spu->number = number++;
 637        spin_unlock(&spu_lock);
 638
 639        ret = spu_create_spu(spu, data);
 640
 641        if (ret)
 642                goto out_free;
 643
 644        spu_mfc_sdr_setup(spu);
 645        spu_mfc_sr1_set(spu, 0x33);
 646        ret = spu_request_irqs(spu);
 647        if (ret)
 648                goto out_destroy;
 649
 650        ret = spu_create_sysdev(spu);
 651        if (ret)
 652                goto out_free_irqs;
 653
 654        mutex_lock(&cbe_spu_info[spu->node].list_mutex);
 655        list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
 656        cbe_spu_info[spu->node].n_spus++;
 657        mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
 658
 659        mutex_lock(&spu_full_list_mutex);
 660        spin_lock_irqsave(&spu_full_list_lock, flags);
 661        list_add(&spu->full_list, &spu_full_list);
 662        spin_unlock_irqrestore(&spu_full_list_lock, flags);
 663        mutex_unlock(&spu_full_list_mutex);
 664
 665        spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
 666        ktime_get_ts(&ts);
 667        spu->stats.tstamp = timespec_to_ns(&ts);
 668
 669        INIT_LIST_HEAD(&spu->aff_list);
 670
 671        goto out;
 672
 673out_free_irqs:
 674        spu_free_irqs(spu);
 675out_destroy:
 676        spu_destroy_spu(spu);
 677out_free:
 678        kfree(spu);
 679out:
 680        return ret;
 681}
 682
 683static const char *spu_state_names[] = {
 684        "user", "system", "iowait", "idle"
 685};
 686
 687static unsigned long long spu_acct_time(struct spu *spu,
 688                enum spu_utilization_state state)
 689{
 690        struct timespec ts;
 691        unsigned long long time = spu->stats.times[state];
 692
 693        /*
 694         * If the spu is idle or the context is stopped, utilization
 695         * statistics are not updated.  Apply the time delta from the
 696         * last recorded state of the spu.
 697         */
 698        if (spu->stats.util_state == state) {
 699                ktime_get_ts(&ts);
 700                time += timespec_to_ns(&ts) - spu->stats.tstamp;
 701        }
 702
 703        return time / NSEC_PER_MSEC;
 704}
 705
 706
 707static ssize_t spu_stat_show(struct sys_device *sysdev,
 708                                struct sysdev_attribute *attr, char *buf)
 709{
 710        struct spu *spu = container_of(sysdev, struct spu, sysdev);
 711
 712        return sprintf(buf, "%s %llu %llu %llu %llu "
 713                      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
 714                spu_state_names[spu->stats.util_state],
 715                spu_acct_time(spu, SPU_UTIL_USER),
 716                spu_acct_time(spu, SPU_UTIL_SYSTEM),
 717                spu_acct_time(spu, SPU_UTIL_IOWAIT),
 718                spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
 719                spu->stats.vol_ctx_switch,
 720                spu->stats.invol_ctx_switch,
 721                spu->stats.slb_flt,
 722                spu->stats.hash_flt,
 723                spu->stats.min_flt,
 724                spu->stats.maj_flt,
 725                spu->stats.class2_intr,
 726                spu->stats.libassist);
 727}
 728
 729static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
 730
 731#ifdef CONFIG_KEXEC
 732
 733struct crash_spu_info {
 734        struct spu *spu;
 735        u32 saved_spu_runcntl_RW;
 736        u32 saved_spu_status_R;
 737        u32 saved_spu_npc_RW;
 738        u64 saved_mfc_sr1_RW;
 739        u64 saved_mfc_dar;
 740        u64 saved_mfc_dsisr;
 741};
 742
 743#define CRASH_NUM_SPUS  16      /* Enough for current hardware */
 744static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
 745
 746static void crash_kexec_stop_spus(void)
 747{
 748        struct spu *spu;
 749        int i;
 750        u64 tmp;
 751
 752        for (i = 0; i < CRASH_NUM_SPUS; i++) {
 753                if (!crash_spu_info[i].spu)
 754                        continue;
 755
 756                spu = crash_spu_info[i].spu;
 757
 758                crash_spu_info[i].saved_spu_runcntl_RW =
 759                        in_be32(&spu->problem->spu_runcntl_RW);
 760                crash_spu_info[i].saved_spu_status_R =
 761                        in_be32(&spu->problem->spu_status_R);
 762                crash_spu_info[i].saved_spu_npc_RW =
 763                        in_be32(&spu->problem->spu_npc_RW);
 764
 765                crash_spu_info[i].saved_mfc_dar    = spu_mfc_dar_get(spu);
 766                crash_spu_info[i].saved_mfc_dsisr  = spu_mfc_dsisr_get(spu);
 767                tmp = spu_mfc_sr1_get(spu);
 768                crash_spu_info[i].saved_mfc_sr1_RW = tmp;
 769
 770                tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
 771                spu_mfc_sr1_set(spu, tmp);
 772
 773                __delay(200);
 774        }
 775}
 776
 777static void crash_register_spus(struct list_head *list)
 778{
 779        struct spu *spu;
 780        int ret;
 781
 782        list_for_each_entry(spu, list, full_list) {
 783                if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
 784                        continue;
 785
 786                crash_spu_info[spu->number].spu = spu;
 787        }
 788
 789        ret = crash_shutdown_register(&crash_kexec_stop_spus);
 790        if (ret)
 791                printk(KERN_ERR "Could not register SPU crash handler");
 792}
 793
 794#else
 795static inline void crash_register_spus(struct list_head *list)
 796{
 797}
 798#endif
 799
 800static int __init init_spu_base(void)
 801{
 802        int i, ret = 0;
 803
 804        for (i = 0; i < MAX_NUMNODES; i++) {
 805                mutex_init(&cbe_spu_info[i].list_mutex);
 806                INIT_LIST_HEAD(&cbe_spu_info[i].spus);
 807        }
 808
 809        if (!spu_management_ops)
 810                goto out;
 811
 812        /* create sysdev class for spus */
 813        ret = sysdev_class_register(&spu_sysdev_class);
 814        if (ret)
 815                goto out;
 816
 817        ret = spu_enumerate_spus(create_spu);
 818
 819        if (ret < 0) {
 820                printk(KERN_WARNING "%s: Error initializing spus\n",
 821                        __func__);
 822                goto out_unregister_sysdev_class;
 823        }
 824
 825        if (ret > 0)
 826                fb_append_extra_logo(&logo_spe_clut224, ret);
 827
 828        mutex_lock(&spu_full_list_mutex);
 829        xmon_register_spus(&spu_full_list);
 830        crash_register_spus(&spu_full_list);
 831        mutex_unlock(&spu_full_list_mutex);
 832        spu_add_sysdev_attr(&attr_stat);
 833
 834        spu_init_affinity();
 835
 836        return 0;
 837
 838 out_unregister_sysdev_class:
 839        sysdev_class_unregister(&spu_sysdev_class);
 840 out:
 841        return ret;
 842}
 843module_init(init_spu_base);
 844
 845MODULE_LICENSE("GPL");
 846MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
 847