linux/arch/mips/kernel/vpe.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved.
   3 *
   4 *  This program is free software; you can distribute it and/or modify it
   5 *  under the terms of the GNU General Public License (Version 2) as
   6 *  published by the Free Software Foundation.
   7 *
   8 *  This program is distributed in the hope it will be useful, but WITHOUT
   9 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11 *  for more details.
  12 *
  13 *  You should have received a copy of the GNU General Public License along
  14 *  with this program; if not, write to the Free Software Foundation, Inc.,
  15 *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
  16 */
  17
  18/*
  19 * VPE support module
  20 *
  21 * Provides support for loading a MIPS SP program on VPE1.
  22 * The SP environment is rather simple, no tlb's.  It needs to be relocatable
  23 * (or partially linked). You should initialise your stack in the startup
  24 * code. This loader looks for the symbol __start and sets up
  25 * execution to resume from there. The MIPS SDE kit contains suitable examples.
  26 *
  27 * To load and run, simply cat a SP 'program file' to /dev/vpe1.
  28 * i.e cat spapp >/dev/vpe1.
  29 */
  30#include <linux/kernel.h>
  31#include <linux/device.h>
  32#include <linux/fs.h>
  33#include <linux/init.h>
  34#include <asm/uaccess.h>
  35#include <linux/slab.h>
  36#include <linux/list.h>
  37#include <linux/vmalloc.h>
  38#include <linux/elf.h>
  39#include <linux/seq_file.h>
  40#include <linux/syscalls.h>
  41#include <linux/moduleloader.h>
  42#include <linux/interrupt.h>
  43#include <linux/poll.h>
  44#include <linux/bootmem.h>
  45#include <asm/mipsregs.h>
  46#include <asm/mipsmtregs.h>
  47#include <asm/cacheflush.h>
  48#include <linux/atomic.h>
  49#include <asm/cpu.h>
  50#include <asm/mips_mt.h>
  51#include <asm/processor.h>
  52#include <asm/vpe.h>
  53#include <asm/kspd.h>
  54
  55typedef void *vpe_handle;
  56
  57#ifndef ARCH_SHF_SMALL
  58#define ARCH_SHF_SMALL 0
  59#endif
  60
  61/* If this is set, the section belongs in the init part of the module */
  62#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
  63
  64/*
  65 * The number of TCs and VPEs physically available on the core
  66 */
  67static int hw_tcs, hw_vpes;
  68static char module_name[] = "vpe";
  69static int major;
  70static const int minor = 1;     /* fixed for now  */
  71
  72#ifdef CONFIG_MIPS_APSP_KSPD
  73static struct kspd_notifications kspd_events;
  74static int kspd_events_reqd;
  75#endif
  76
  77/* grab the likely amount of memory we will need. */
  78#ifdef CONFIG_MIPS_VPE_LOADER_TOM
  79#define P_SIZE (2 * 1024 * 1024)
  80#else
  81/* add an overhead to the max kmalloc size for non-striped symbols/etc */
  82#define P_SIZE (256 * 1024)
  83#endif
  84
  85extern unsigned long physical_memsize;
  86
  87#define MAX_VPES 16
  88#define VPE_PATH_MAX 256
  89
  90enum vpe_state {
  91        VPE_STATE_UNUSED = 0,
  92        VPE_STATE_INUSE,
  93        VPE_STATE_RUNNING
  94};
  95
  96enum tc_state {
  97        TC_STATE_UNUSED = 0,
  98        TC_STATE_INUSE,
  99        TC_STATE_RUNNING,
 100        TC_STATE_DYNAMIC
 101};
 102
 103struct vpe {
 104        enum vpe_state state;
 105
 106        /* (device) minor associated with this vpe */
 107        int minor;
 108
 109        /* elfloader stuff */
 110        void *load_addr;
 111        unsigned long len;
 112        char *pbuffer;
 113        unsigned long plen;
 114        unsigned int uid, gid;
 115        char cwd[VPE_PATH_MAX];
 116
 117        unsigned long __start;
 118
 119        /* tc's associated with this vpe */
 120        struct list_head tc;
 121
 122        /* The list of vpe's */
 123        struct list_head list;
 124
 125        /* shared symbol address */
 126        void *shared_ptr;
 127
 128        /* the list of who wants to know when something major happens */
 129        struct list_head notify;
 130
 131        unsigned int ntcs;
 132};
 133
 134struct tc {
 135        enum tc_state state;
 136        int index;
 137
 138        struct vpe *pvpe;       /* parent VPE */
 139        struct list_head tc;    /* The list of TC's with this VPE */
 140        struct list_head list;  /* The global list of tc's */
 141};
 142
 143struct {
 144        spinlock_t vpe_list_lock;
 145        struct list_head vpe_list;      /* Virtual processing elements */
 146        spinlock_t tc_list_lock;
 147        struct list_head tc_list;       /* Thread contexts */
 148} vpecontrol = {
 149        .vpe_list_lock  = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
 150        .vpe_list       = LIST_HEAD_INIT(vpecontrol.vpe_list),
 151        .tc_list_lock   = __SPIN_LOCK_UNLOCKED(tc_list_lock),
 152        .tc_list        = LIST_HEAD_INIT(vpecontrol.tc_list)
 153};
 154
 155static void release_progmem(void *ptr);
 156
 157/* get the vpe associated with this minor */
 158static struct vpe *get_vpe(int minor)
 159{
 160        struct vpe *res, *v;
 161
 162        if (!cpu_has_mipsmt)
 163                return NULL;
 164
 165        res = NULL;
 166        spin_lock(&vpecontrol.vpe_list_lock);
 167        list_for_each_entry(v, &vpecontrol.vpe_list, list) {
 168                if (v->minor == minor) {
 169                        res = v;
 170                        break;
 171                }
 172        }
 173        spin_unlock(&vpecontrol.vpe_list_lock);
 174
 175        return res;
 176}
 177
 178/* get the vpe associated with this minor */
 179static struct tc *get_tc(int index)
 180{
 181        struct tc *res, *t;
 182
 183        res = NULL;
 184        spin_lock(&vpecontrol.tc_list_lock);
 185        list_for_each_entry(t, &vpecontrol.tc_list, list) {
 186                if (t->index == index) {
 187                        res = t;
 188                        break;
 189                }
 190        }
 191        spin_unlock(&vpecontrol.tc_list_lock);
 192
 193        return res;
 194}
 195
 196/* allocate a vpe and associate it with this minor (or index) */
 197static struct vpe *alloc_vpe(int minor)
 198{
 199        struct vpe *v;
 200
 201        if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL)
 202                return NULL;
 203
 204        INIT_LIST_HEAD(&v->tc);
 205        spin_lock(&vpecontrol.vpe_list_lock);
 206        list_add_tail(&v->list, &vpecontrol.vpe_list);
 207        spin_unlock(&vpecontrol.vpe_list_lock);
 208
 209        INIT_LIST_HEAD(&v->notify);
 210        v->minor = minor;
 211
 212        return v;
 213}
 214
 215/* allocate a tc. At startup only tc0 is running, all other can be halted. */
 216static struct tc *alloc_tc(int index)
 217{
 218        struct tc *tc;
 219
 220        if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL)
 221                goto out;
 222
 223        INIT_LIST_HEAD(&tc->tc);
 224        tc->index = index;
 225
 226        spin_lock(&vpecontrol.tc_list_lock);
 227        list_add_tail(&tc->list, &vpecontrol.tc_list);
 228        spin_unlock(&vpecontrol.tc_list_lock);
 229
 230out:
 231        return tc;
 232}
 233
 234/* clean up and free everything */
 235static void release_vpe(struct vpe *v)
 236{
 237        list_del(&v->list);
 238        if (v->load_addr)
 239                release_progmem(v);
 240        kfree(v);
 241}
 242
 243static void __maybe_unused dump_mtregs(void)
 244{
 245        unsigned long val;
 246
 247        val = read_c0_config3();
 248        printk("config3 0x%lx MT %ld\n", val,
 249               (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
 250
 251        val = read_c0_mvpcontrol();
 252        printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
 253               (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
 254               (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
 255               (val & MVPCONTROL_EVP));
 256
 257        val = read_c0_mvpconf0();
 258        printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
 259               (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
 260               val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
 261}
 262
 263/* Find some VPE program space  */
 264static void *alloc_progmem(unsigned long len)
 265{
 266        void *addr;
 267
 268#ifdef CONFIG_MIPS_VPE_LOADER_TOM
 269        /*
 270         * This means you must tell Linux to use less memory than you
 271         * physically have, for example by passing a mem= boot argument.
 272         */
 273        addr = pfn_to_kaddr(max_low_pfn);
 274        memset(addr, 0, len);
 275#else
 276        /* simple grab some mem for now */
 277        addr = kzalloc(len, GFP_KERNEL);
 278#endif
 279
 280        return addr;
 281}
 282
 283static void release_progmem(void *ptr)
 284{
 285#ifndef CONFIG_MIPS_VPE_LOADER_TOM
 286        kfree(ptr);
 287#endif
 288}
 289
 290/* Update size with this section: return offset. */
 291static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
 292{
 293        long ret;
 294
 295        ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
 296        *size = ret + sechdr->sh_size;
 297        return ret;
 298}
 299
 300/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
 301   might -- code, read-only data, read-write data, small data.  Tally
 302   sizes, and place the offsets into sh_entsize fields: high bit means it
 303   belongs in init. */
 304static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
 305                            Elf_Shdr * sechdrs, const char *secstrings)
 306{
 307        static unsigned long const masks[][2] = {
 308                /* NOTE: all executable code must be the first section
 309                 * in this array; otherwise modify the text_size
 310                 * finder in the two loops below */
 311                {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
 312                {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
 313                {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
 314                {ARCH_SHF_SMALL | SHF_ALLOC, 0}
 315        };
 316        unsigned int m, i;
 317
 318        for (i = 0; i < hdr->e_shnum; i++)
 319                sechdrs[i].sh_entsize = ~0UL;
 320
 321        for (m = 0; m < ARRAY_SIZE(masks); ++m) {
 322                for (i = 0; i < hdr->e_shnum; ++i) {
 323                        Elf_Shdr *s = &sechdrs[i];
 324
 325                        //  || strncmp(secstrings + s->sh_name, ".init", 5) == 0)
 326                        if ((s->sh_flags & masks[m][0]) != masks[m][0]
 327                            || (s->sh_flags & masks[m][1])
 328                            || s->sh_entsize != ~0UL)
 329                                continue;
 330                        s->sh_entsize =
 331                                get_offset((unsigned long *)&mod->core_size, s);
 332                }
 333
 334                if (m == 0)
 335                        mod->core_text_size = mod->core_size;
 336
 337        }
 338}
 339
 340
 341/* from module-elf32.c, but subverted a little */
 342
 343struct mips_hi16 {
 344        struct mips_hi16 *next;
 345        Elf32_Addr *addr;
 346        Elf32_Addr value;
 347};
 348
 349static struct mips_hi16 *mips_hi16_list;
 350static unsigned int gp_offs, gp_addr;
 351
 352static int apply_r_mips_none(struct module *me, uint32_t *location,
 353                             Elf32_Addr v)
 354{
 355        return 0;
 356}
 357
 358static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
 359                                Elf32_Addr v)
 360{
 361        int rel;
 362
 363        if( !(*location & 0xffff) ) {
 364                rel = (int)v - gp_addr;
 365        }
 366        else {
 367                /* .sbss + gp(relative) + offset */
 368                /* kludge! */
 369                rel =  (int)(short)((int)v + gp_offs +
 370                                    (int)(short)(*location & 0xffff) - gp_addr);
 371        }
 372
 373        if( (rel > 32768) || (rel < -32768) ) {
 374                printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: "
 375                       "relative address 0x%x out of range of gp register\n",
 376                       rel);
 377                return -ENOEXEC;
 378        }
 379
 380        *location = (*location & 0xffff0000) | (rel & 0xffff);
 381
 382        return 0;
 383}
 384
 385static int apply_r_mips_pc16(struct module *me, uint32_t *location,
 386                             Elf32_Addr v)
 387{
 388        int rel;
 389        rel = (((unsigned int)v - (unsigned int)location));
 390        rel >>= 2;              // because the offset is in _instructions_ not bytes.
 391        rel -= 1;               // and one instruction less due to the branch delay slot.
 392
 393        if( (rel > 32768) || (rel < -32768) ) {
 394                printk(KERN_DEBUG "VPE loader: "
 395                       "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
 396                return -ENOEXEC;
 397        }
 398
 399        *location = (*location & 0xffff0000) | (rel & 0xffff);
 400
 401        return 0;
 402}
 403
 404static int apply_r_mips_32(struct module *me, uint32_t *location,
 405                           Elf32_Addr v)
 406{
 407        *location += v;
 408
 409        return 0;
 410}
 411
 412static int apply_r_mips_26(struct module *me, uint32_t *location,
 413                           Elf32_Addr v)
 414{
 415        if (v % 4) {
 416                printk(KERN_DEBUG "VPE loader: apply_r_mips_26 "
 417                       " unaligned relocation\n");
 418                return -ENOEXEC;
 419        }
 420
 421/*
 422 * Not desperately convinced this is a good check of an overflow condition
 423 * anyway. But it gets in the way of handling undefined weak symbols which
 424 * we want to set to zero.
 425 * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
 426 * printk(KERN_ERR
 427 * "module %s: relocation overflow\n",
 428 * me->name);
 429 * return -ENOEXEC;
 430 * }
 431 */
 432
 433        *location = (*location & ~0x03ffffff) |
 434                ((*location + (v >> 2)) & 0x03ffffff);
 435        return 0;
 436}
 437
 438static int apply_r_mips_hi16(struct module *me, uint32_t *location,
 439                             Elf32_Addr v)
 440{
 441        struct mips_hi16 *n;
 442
 443        /*
 444         * We cannot relocate this one now because we don't know the value of
 445         * the carry we need to add.  Save the information, and let LO16 do the
 446         * actual relocation.
 447         */
 448        n = kmalloc(sizeof *n, GFP_KERNEL);
 449        if (!n)
 450                return -ENOMEM;
 451
 452        n->addr = location;
 453        n->value = v;
 454        n->next = mips_hi16_list;
 455        mips_hi16_list = n;
 456
 457        return 0;
 458}
 459
 460static int apply_r_mips_lo16(struct module *me, uint32_t *location,
 461                             Elf32_Addr v)
 462{
 463        unsigned long insnlo = *location;
 464        Elf32_Addr val, vallo;
 465        struct mips_hi16 *l, *next;
 466
 467        /* Sign extend the addend we extract from the lo insn.  */
 468        vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
 469
 470        if (mips_hi16_list != NULL) {
 471
 472                l = mips_hi16_list;
 473                while (l != NULL) {
 474                        unsigned long insn;
 475
 476                        /*
 477                         * The value for the HI16 had best be the same.
 478                         */
 479                        if (v != l->value) {
 480                                printk(KERN_DEBUG "VPE loader: "
 481                                       "apply_r_mips_lo16/hi16: \t"
 482                                       "inconsistent value information\n");
 483                                goto out_free;
 484                        }
 485
 486                        /*
 487                         * Do the HI16 relocation.  Note that we actually don't
 488                         * need to know anything about the LO16 itself, except
 489                         * where to find the low 16 bits of the addend needed
 490                         * by the LO16.
 491                         */
 492                        insn = *l->addr;
 493                        val = ((insn & 0xffff) << 16) + vallo;
 494                        val += v;
 495
 496                        /*
 497                         * Account for the sign extension that will happen in
 498                         * the low bits.
 499                         */
 500                        val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
 501
 502                        insn = (insn & ~0xffff) | val;
 503                        *l->addr = insn;
 504
 505                        next = l->next;
 506                        kfree(l);
 507                        l = next;
 508                }
 509
 510                mips_hi16_list = NULL;
 511        }
 512
 513        /*
 514         * Ok, we're done with the HI16 relocs.  Now deal with the LO16.
 515         */
 516        val = v + vallo;
 517        insnlo = (insnlo & ~0xffff) | (val & 0xffff);
 518        *location = insnlo;
 519
 520        return 0;
 521
 522out_free:
 523        while (l != NULL) {
 524                next = l->next;
 525                kfree(l);
 526                l = next;
 527        }
 528        mips_hi16_list = NULL;
 529
 530        return -ENOEXEC;
 531}
 532
 533static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
 534                                Elf32_Addr v) = {
 535        [R_MIPS_NONE]   = apply_r_mips_none,
 536        [R_MIPS_32]     = apply_r_mips_32,
 537        [R_MIPS_26]     = apply_r_mips_26,
 538        [R_MIPS_HI16]   = apply_r_mips_hi16,
 539        [R_MIPS_LO16]   = apply_r_mips_lo16,
 540        [R_MIPS_GPREL16] = apply_r_mips_gprel16,
 541        [R_MIPS_PC16] = apply_r_mips_pc16
 542};
 543
 544static char *rstrs[] = {
 545        [R_MIPS_NONE]   = "MIPS_NONE",
 546        [R_MIPS_32]     = "MIPS_32",
 547        [R_MIPS_26]     = "MIPS_26",
 548        [R_MIPS_HI16]   = "MIPS_HI16",
 549        [R_MIPS_LO16]   = "MIPS_LO16",
 550        [R_MIPS_GPREL16] = "MIPS_GPREL16",
 551        [R_MIPS_PC16] = "MIPS_PC16"
 552};
 553
 554static int apply_relocations(Elf32_Shdr *sechdrs,
 555                      const char *strtab,
 556                      unsigned int symindex,
 557                      unsigned int relsec,
 558                      struct module *me)
 559{
 560        Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
 561        Elf32_Sym *sym;
 562        uint32_t *location;
 563        unsigned int i;
 564        Elf32_Addr v;
 565        int res;
 566
 567        for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 568                Elf32_Word r_info = rel[i].r_info;
 569
 570                /* This is where to make the change */
 571                location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
 572                        + rel[i].r_offset;
 573                /* This is the symbol it is referring to */
 574                sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
 575                        + ELF32_R_SYM(r_info);
 576
 577                if (!sym->st_value) {
 578                        printk(KERN_DEBUG "%s: undefined weak symbol %s\n",
 579                               me->name, strtab + sym->st_name);
 580                        /* just print the warning, dont barf */
 581                }
 582
 583                v = sym->st_value;
 584
 585                res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
 586                if( res ) {
 587                        char *r = rstrs[ELF32_R_TYPE(r_info)];
 588                        printk(KERN_WARNING "VPE loader: .text+0x%x "
 589                               "relocation type %s for symbol \"%s\" failed\n",
 590                               rel[i].r_offset, r ? r : "UNKNOWN",
 591                               strtab + sym->st_name);
 592                        return res;
 593                }
 594        }
 595
 596        return 0;
 597}
 598
 599static inline void save_gp_address(unsigned int secbase, unsigned int rel)
 600{
 601        gp_addr = secbase + rel;
 602        gp_offs = gp_addr - (secbase & 0xffff0000);
 603}
 604/* end module-elf32.c */
 605
 606
 607
 608/* Change all symbols so that sh_value encodes the pointer directly. */
 609static void simplify_symbols(Elf_Shdr * sechdrs,
 610                            unsigned int symindex,
 611                            const char *strtab,
 612                            const char *secstrings,
 613                            unsigned int nsecs, struct module *mod)
 614{
 615        Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
 616        unsigned long secbase, bssbase = 0;
 617        unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
 618        int size;
 619
 620        /* find the .bss section for COMMON symbols */
 621        for (i = 0; i < nsecs; i++) {
 622                if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
 623                        bssbase = sechdrs[i].sh_addr;
 624                        break;
 625                }
 626        }
 627
 628        for (i = 1; i < n; i++) {
 629                switch (sym[i].st_shndx) {
 630                case SHN_COMMON:
 631                        /* Allocate space for the symbol in the .bss section.
 632                           st_value is currently size.
 633                           We want it to have the address of the symbol. */
 634
 635                        size = sym[i].st_value;
 636                        sym[i].st_value = bssbase;
 637
 638                        bssbase += size;
 639                        break;
 640
 641                case SHN_ABS:
 642                        /* Don't need to do anything */
 643                        break;
 644
 645                case SHN_UNDEF:
 646                        /* ret = -ENOENT; */
 647                        break;
 648
 649                case SHN_MIPS_SCOMMON:
 650                        printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON "
 651                               "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name,
 652                               sym[i].st_shndx);
 653                        // .sbss section
 654                        break;
 655
 656                default:
 657                        secbase = sechdrs[sym[i].st_shndx].sh_addr;
 658
 659                        if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) {
 660                                save_gp_address(secbase, sym[i].st_value);
 661                        }
 662
 663                        sym[i].st_value += secbase;
 664                        break;
 665                }
 666        }
 667}
 668
 669#ifdef DEBUG_ELFLOADER
 670static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
 671                            const char *strtab, struct module *mod)
 672{
 673        Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
 674        unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
 675
 676        printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n);
 677        for (i = 1; i < n; i++) {
 678                printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i,
 679                       strtab + sym[i].st_name, sym[i].st_value);
 680        }
 681}
 682#endif
 683
 684/* We are prepared so configure and start the VPE... */
 685static int vpe_run(struct vpe * v)
 686{
 687        unsigned long flags, val, dmt_flag;
 688        struct vpe_notifications *n;
 689        unsigned int vpeflags;
 690        struct tc *t;
 691
 692        /* check we are the Master VPE */
 693        local_irq_save(flags);
 694        val = read_c0_vpeconf0();
 695        if (!(val & VPECONF0_MVP)) {
 696                printk(KERN_WARNING
 697                       "VPE loader: only Master VPE's are allowed to configure MT\n");
 698                local_irq_restore(flags);
 699
 700                return -1;
 701        }
 702
 703        dmt_flag = dmt();
 704        vpeflags = dvpe();
 705
 706        if (!list_empty(&v->tc)) {
 707                if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
 708                        evpe(vpeflags);
 709                        emt(dmt_flag);
 710                        local_irq_restore(flags);
 711
 712                        printk(KERN_WARNING
 713                               "VPE loader: TC %d is already in use.\n",
 714                               t->index);
 715                        return -ENOEXEC;
 716                }
 717        } else {
 718                evpe(vpeflags);
 719                emt(dmt_flag);
 720                local_irq_restore(flags);
 721
 722                printk(KERN_WARNING
 723                       "VPE loader: No TC's associated with VPE %d\n",
 724                       v->minor);
 725
 726                return -ENOEXEC;
 727        }
 728
 729        /* Put MVPE's into 'configuration state' */
 730        set_c0_mvpcontrol(MVPCONTROL_VPC);
 731
 732        settc(t->index);
 733
 734        /* should check it is halted, and not activated */
 735        if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
 736                evpe(vpeflags);
 737                emt(dmt_flag);
 738                local_irq_restore(flags);
 739
 740                printk(KERN_WARNING "VPE loader: TC %d is already active!\n",
 741                       t->index);
 742
 743                return -ENOEXEC;
 744        }
 745
 746        /* Write the address we want it to start running from in the TCPC register. */
 747        write_tc_c0_tcrestart((unsigned long)v->__start);
 748        write_tc_c0_tccontext((unsigned long)0);
 749
 750        /*
 751         * Mark the TC as activated, not interrupt exempt and not dynamically
 752         * allocatable
 753         */
 754        val = read_tc_c0_tcstatus();
 755        val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
 756        write_tc_c0_tcstatus(val);
 757
 758        write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
 759
 760        /*
 761         * The sde-kit passes 'memsize' to __start in $a3, so set something
 762         * here...  Or set $a3 to zero and define DFLT_STACK_SIZE and
 763         * DFLT_HEAP_SIZE when you compile your program
 764         */
 765        mttgpr(6, v->ntcs);
 766        mttgpr(7, physical_memsize);
 767
 768        /* set up VPE1 */
 769        /*
 770         * bind the TC to VPE 1 as late as possible so we only have the final
 771         * VPE registers to set up, and so an EJTAG probe can trigger on it
 772         */
 773        write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
 774
 775        write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
 776
 777        back_to_back_c0_hazard();
 778
 779        /* Set up the XTC bit in vpeconf0 to point at our tc */
 780        write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
 781                              | (t->index << VPECONF0_XTC_SHIFT));
 782
 783        back_to_back_c0_hazard();
 784
 785        /* enable this VPE */
 786        write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
 787
 788        /* clear out any left overs from a previous program */
 789        write_vpe_c0_status(0);
 790        write_vpe_c0_cause(0);
 791
 792        /* take system out of configuration state */
 793        clear_c0_mvpcontrol(MVPCONTROL_VPC);
 794
 795        /*
 796         * SMTC/SMVP kernels manage VPE enable independently,
 797         * but uniprocessor kernels need to turn it on, even
 798         * if that wasn't the pre-dvpe() state.
 799         */
 800#ifdef CONFIG_SMP
 801        evpe(vpeflags);
 802#else
 803        evpe(EVPE_ENABLE);
 804#endif
 805        emt(dmt_flag);
 806        local_irq_restore(flags);
 807
 808        list_for_each_entry(n, &v->notify, list)
 809                n->start(minor);
 810
 811        return 0;
 812}
 813
 814static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
 815                                      unsigned int symindex, const char *strtab,
 816                                      struct module *mod)
 817{
 818        Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
 819        unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
 820
 821        for (i = 1; i < n; i++) {
 822                if (strcmp(strtab + sym[i].st_name, "__start") == 0) {
 823                        v->__start = sym[i].st_value;
 824                }
 825
 826                if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) {
 827                        v->shared_ptr = (void *)sym[i].st_value;
 828                }
 829        }
 830
 831        if ( (v->__start == 0) || (v->shared_ptr == NULL))
 832                return -1;
 833
 834        return 0;
 835}
 836
 837/*
 838 * Allocates a VPE with some program code space(the load address), copies the
 839 * contents of the program (p)buffer performing relocatations/etc, free's it
 840 * when finished.
 841 */
 842static int vpe_elfload(struct vpe * v)
 843{
 844        Elf_Ehdr *hdr;
 845        Elf_Shdr *sechdrs;
 846        long err = 0;
 847        char *secstrings, *strtab = NULL;
 848        unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
 849        struct module mod;      // so we can re-use the relocations code
 850
 851        memset(&mod, 0, sizeof(struct module));
 852        strcpy(mod.name, "VPE loader");
 853
 854        hdr = (Elf_Ehdr *) v->pbuffer;
 855        len = v->plen;
 856
 857        /* Sanity checks against insmoding binaries or wrong arch,
 858           weird elf version */
 859        if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
 860            || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
 861            || !elf_check_arch(hdr)
 862            || hdr->e_shentsize != sizeof(*sechdrs)) {
 863                printk(KERN_WARNING
 864                       "VPE loader: program wrong arch or weird elf version\n");
 865
 866                return -ENOEXEC;
 867        }
 868
 869        if (hdr->e_type == ET_REL)
 870                relocate = 1;
 871
 872        if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
 873                printk(KERN_ERR "VPE loader: program length %u truncated\n",
 874                       len);
 875
 876                return -ENOEXEC;
 877        }
 878
 879        /* Convenience variables */
 880        sechdrs = (void *)hdr + hdr->e_shoff;
 881        secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 882        sechdrs[0].sh_addr = 0;
 883
 884        /* And these should exist, but gcc whinges if we don't init them */
 885        symindex = strindex = 0;
 886
 887        if (relocate) {
 888                for (i = 1; i < hdr->e_shnum; i++) {
 889                        if (sechdrs[i].sh_type != SHT_NOBITS
 890                            && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
 891                                printk(KERN_ERR "VPE program length %u truncated\n",
 892                                       len);
 893                                return -ENOEXEC;
 894                        }
 895
 896                        /* Mark all sections sh_addr with their address in the
 897                           temporary image. */
 898                        sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
 899
 900                        /* Internal symbols and strings. */
 901                        if (sechdrs[i].sh_type == SHT_SYMTAB) {
 902                                symindex = i;
 903                                strindex = sechdrs[i].sh_link;
 904                                strtab = (char *)hdr + sechdrs[strindex].sh_offset;
 905                        }
 906                }
 907                layout_sections(&mod, hdr, sechdrs, secstrings);
 908        }
 909
 910        v->load_addr = alloc_progmem(mod.core_size);
 911        if (!v->load_addr)
 912                return -ENOMEM;
 913
 914        pr_info("VPE loader: loading to %p\n", v->load_addr);
 915
 916        if (relocate) {
 917                for (i = 0; i < hdr->e_shnum; i++) {
 918                        void *dest;
 919
 920                        if (!(sechdrs[i].sh_flags & SHF_ALLOC))
 921                                continue;
 922
 923                        dest = v->load_addr + sechdrs[i].sh_entsize;
 924
 925                        if (sechdrs[i].sh_type != SHT_NOBITS)
 926                                memcpy(dest, (void *)sechdrs[i].sh_addr,
 927                                       sechdrs[i].sh_size);
 928                        /* Update sh_addr to point to copy in image. */
 929                        sechdrs[i].sh_addr = (unsigned long)dest;
 930
 931                        printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n",
 932                               secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
 933                }
 934
 935                /* Fix up syms, so that st_value is a pointer to location. */
 936                simplify_symbols(sechdrs, symindex, strtab, secstrings,
 937                                 hdr->e_shnum, &mod);
 938
 939                /* Now do relocations. */
 940                for (i = 1; i < hdr->e_shnum; i++) {
 941                        const char *strtab = (char *)sechdrs[strindex].sh_addr;
 942                        unsigned int info = sechdrs[i].sh_info;
 943
 944                        /* Not a valid relocation section? */
 945                        if (info >= hdr->e_shnum)
 946                                continue;
 947
 948                        /* Don't bother with non-allocated sections */
 949                        if (!(sechdrs[info].sh_flags & SHF_ALLOC))
 950                                continue;
 951
 952                        if (sechdrs[i].sh_type == SHT_REL)
 953                                err = apply_relocations(sechdrs, strtab, symindex, i,
 954                                                        &mod);
 955                        else if (sechdrs[i].sh_type == SHT_RELA)
 956                                err = apply_relocate_add(sechdrs, strtab, symindex, i,
 957                                                         &mod);
 958                        if (err < 0)
 959                                return err;
 960
 961                }
 962        } else {
 963                struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
 964
 965                for (i = 0; i < hdr->e_phnum; i++) {
 966                        if (phdr->p_type == PT_LOAD) {
 967                                memcpy((void *)phdr->p_paddr,
 968                                       (char *)hdr + phdr->p_offset,
 969                                       phdr->p_filesz);
 970                                memset((void *)phdr->p_paddr + phdr->p_filesz,
 971                                       0, phdr->p_memsz - phdr->p_filesz);
 972                    }
 973                    phdr++;
 974                }
 975
 976                for (i = 0; i < hdr->e_shnum; i++) {
 977                        /* Internal symbols and strings. */
 978                        if (sechdrs[i].sh_type == SHT_SYMTAB) {
 979                                symindex = i;
 980                                strindex = sechdrs[i].sh_link;
 981                                strtab = (char *)hdr + sechdrs[strindex].sh_offset;
 982
 983                                /* mark the symtab's address for when we try to find the
 984                                   magic symbols */
 985                                sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
 986                        }
 987                }
 988        }
 989
 990        /* make sure it's physically written out */
 991        flush_icache_range((unsigned long)v->load_addr,
 992                           (unsigned long)v->load_addr + v->len);
 993
 994        if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
 995                if (v->__start == 0) {
 996                        printk(KERN_WARNING "VPE loader: program does not contain "
 997                               "a __start symbol\n");
 998                        return -ENOEXEC;
 999                }
1000
1001                if (v->shared_ptr == NULL)
1002                        printk(KERN_WARNING "VPE loader: "
1003                               "program does not contain vpe_shared symbol.\n"
1004                               " Unable to use AMVP (AP/SP) facilities.\n");
1005        }
1006
1007        printk(" elf loaded\n");
1008        return 0;
1009}
1010
1011static void cleanup_tc(struct tc *tc)
1012{
1013        unsigned long flags;
1014        unsigned int mtflags, vpflags;
1015        int tmp;
1016
1017        local_irq_save(flags);
1018        mtflags = dmt();
1019        vpflags = dvpe();
1020        /* Put MVPE's into 'configuration state' */
1021        set_c0_mvpcontrol(MVPCONTROL_VPC);
1022
1023        settc(tc->index);
1024        tmp = read_tc_c0_tcstatus();
1025
1026        /* mark not allocated and not dynamically allocatable */
1027        tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1028        tmp |= TCSTATUS_IXMT;   /* interrupt exempt */
1029        write_tc_c0_tcstatus(tmp);
1030
1031        write_tc_c0_tchalt(TCHALT_H);
1032        mips_ihb();
1033
1034        /* bind it to anything other than VPE1 */
1035//      write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
1036
1037        clear_c0_mvpcontrol(MVPCONTROL_VPC);
1038        evpe(vpflags);
1039        emt(mtflags);
1040        local_irq_restore(flags);
1041}
1042
1043static int getcwd(char *buff, int size)
1044{
1045        mm_segment_t old_fs;
1046        int ret;
1047
1048        old_fs = get_fs();
1049        set_fs(KERNEL_DS);
1050
1051        ret = sys_getcwd(buff, size);
1052
1053        set_fs(old_fs);
1054
1055        return ret;
1056}
1057
1058/* checks VPE is unused and gets ready to load program  */
1059static int vpe_open(struct inode *inode, struct file *filp)
1060{
1061        enum vpe_state state;
1062        struct vpe_notifications *not;
1063        struct vpe *v;
1064        int ret;
1065
1066        if (minor != iminor(inode)) {
1067                /* assume only 1 device at the moment. */
1068                pr_warning("VPE loader: only vpe1 is supported\n");
1069
1070                return -ENODEV;
1071        }
1072
1073        if ((v = get_vpe(tclimit)) == NULL) {
1074                pr_warning("VPE loader: unable to get vpe\n");
1075
1076                return -ENODEV;
1077        }
1078
1079        state = xchg(&v->state, VPE_STATE_INUSE);
1080        if (state != VPE_STATE_UNUSED) {
1081                printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
1082
1083                list_for_each_entry(not, &v->notify, list) {
1084                        not->stop(tclimit);
1085                }
1086
1087                release_progmem(v->load_addr);
1088                cleanup_tc(get_tc(tclimit));
1089        }
1090
1091        /* this of-course trashes what was there before... */
1092        v->pbuffer = vmalloc(P_SIZE);
1093        if (!v->pbuffer) {
1094                pr_warning("VPE loader: unable to allocate memory\n");
1095                return -ENOMEM;
1096        }
1097        v->plen = P_SIZE;
1098        v->load_addr = NULL;
1099        v->len = 0;
1100
1101        v->uid = filp->f_cred->fsuid;
1102        v->gid = filp->f_cred->fsgid;
1103
1104#ifdef CONFIG_MIPS_APSP_KSPD
1105        /* get kspd to tell us when a syscall_exit happens */
1106        if (!kspd_events_reqd) {
1107                kspd_notify(&kspd_events);
1108                kspd_events_reqd++;
1109        }
1110#endif
1111
1112        v->cwd[0] = 0;
1113        ret = getcwd(v->cwd, VPE_PATH_MAX);
1114        if (ret < 0)
1115                printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret);
1116
1117        v->shared_ptr = NULL;
1118        v->__start = 0;
1119
1120        return 0;
1121}
1122
1123static int vpe_release(struct inode *inode, struct file *filp)
1124{
1125        struct vpe *v;
1126        Elf_Ehdr *hdr;
1127        int ret = 0;
1128
1129        v = get_vpe(tclimit);
1130        if (v == NULL)
1131                return -ENODEV;
1132
1133        hdr = (Elf_Ehdr *) v->pbuffer;
1134        if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
1135                if (vpe_elfload(v) >= 0) {
1136                        vpe_run(v);
1137                } else {
1138                        printk(KERN_WARNING "VPE loader: ELF load failed.\n");
1139                        ret = -ENOEXEC;
1140                }
1141        } else {
1142                printk(KERN_WARNING "VPE loader: only elf files are supported\n");
1143                ret = -ENOEXEC;
1144        }
1145
1146        /* It's good to be able to run the SP and if it chokes have a look at
1147           the /dev/rt?. But if we reset the pointer to the shared struct we
1148           lose what has happened. So perhaps if garbage is sent to the vpe
1149           device, use it as a trigger for the reset. Hopefully a nice
1150           executable will be along shortly. */
1151        if (ret < 0)
1152                v->shared_ptr = NULL;
1153
1154        vfree(v->pbuffer);
1155        v->plen = 0;
1156
1157        return ret;
1158}
1159
1160static ssize_t vpe_write(struct file *file, const char __user * buffer,
1161                         size_t count, loff_t * ppos)
1162{
1163        size_t ret = count;
1164        struct vpe *v;
1165
1166        if (iminor(file->f_path.dentry->d_inode) != minor)
1167                return -ENODEV;
1168
1169        v = get_vpe(tclimit);
1170        if (v == NULL)
1171                return -ENODEV;
1172
1173        if ((count + v->len) > v->plen) {
1174                printk(KERN_WARNING
1175                       "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
1176                return -ENOMEM;
1177        }
1178
1179        count -= copy_from_user(v->pbuffer + v->len, buffer, count);
1180        if (!count)
1181                return -EFAULT;
1182
1183        v->len += count;
1184        return ret;
1185}
1186
1187static const struct file_operations vpe_fops = {
1188        .owner = THIS_MODULE,
1189        .open = vpe_open,
1190        .release = vpe_release,
1191        .write = vpe_write,
1192        .llseek = noop_llseek,
1193};
1194
1195/* module wrapper entry points */
1196/* give me a vpe */
1197vpe_handle vpe_alloc(void)
1198{
1199        int i;
1200        struct vpe *v;
1201
1202        /* find a vpe */
1203        for (i = 1; i < MAX_VPES; i++) {
1204                if ((v = get_vpe(i)) != NULL) {
1205                        v->state = VPE_STATE_INUSE;
1206                        return v;
1207                }
1208        }
1209        return NULL;
1210}
1211
1212EXPORT_SYMBOL(vpe_alloc);
1213
1214/* start running from here */
1215int vpe_start(vpe_handle vpe, unsigned long start)
1216{
1217        struct vpe *v = vpe;
1218
1219        v->__start = start;
1220        return vpe_run(v);
1221}
1222
1223EXPORT_SYMBOL(vpe_start);
1224
1225/* halt it for now */
1226int vpe_stop(vpe_handle vpe)
1227{
1228        struct vpe *v = vpe;
1229        struct tc *t;
1230        unsigned int evpe_flags;
1231
1232        evpe_flags = dvpe();
1233
1234        if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) {
1235
1236                settc(t->index);
1237                write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1238        }
1239
1240        evpe(evpe_flags);
1241
1242        return 0;
1243}
1244
1245EXPORT_SYMBOL(vpe_stop);
1246
1247/* I've done with it thank you */
1248int vpe_free(vpe_handle vpe)
1249{
1250        struct vpe *v = vpe;
1251        struct tc *t;
1252        unsigned int evpe_flags;
1253
1254        if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
1255                return -ENOEXEC;
1256        }
1257
1258        evpe_flags = dvpe();
1259
1260        /* Put MVPE's into 'configuration state' */
1261        set_c0_mvpcontrol(MVPCONTROL_VPC);
1262
1263        settc(t->index);
1264        write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1265
1266        /* halt the TC */
1267        write_tc_c0_tchalt(TCHALT_H);
1268        mips_ihb();
1269
1270        /* mark the TC unallocated */
1271        write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
1272
1273        v->state = VPE_STATE_UNUSED;
1274
1275        clear_c0_mvpcontrol(MVPCONTROL_VPC);
1276        evpe(evpe_flags);
1277
1278        return 0;
1279}
1280
1281EXPORT_SYMBOL(vpe_free);
1282
1283void *vpe_get_shared(int index)
1284{
1285        struct vpe *v;
1286
1287        if ((v = get_vpe(index)) == NULL)
1288                return NULL;
1289
1290        return v->shared_ptr;
1291}
1292
1293EXPORT_SYMBOL(vpe_get_shared);
1294
1295int vpe_getuid(int index)
1296{
1297        struct vpe *v;
1298
1299        if ((v = get_vpe(index)) == NULL)
1300                return -1;
1301
1302        return v->uid;
1303}
1304
1305EXPORT_SYMBOL(vpe_getuid);
1306
1307int vpe_getgid(int index)
1308{
1309        struct vpe *v;
1310
1311        if ((v = get_vpe(index)) == NULL)
1312                return -1;
1313
1314        return v->gid;
1315}
1316
1317EXPORT_SYMBOL(vpe_getgid);
1318
1319int vpe_notify(int index, struct vpe_notifications *notify)
1320{
1321        struct vpe *v;
1322
1323        if ((v = get_vpe(index)) == NULL)
1324                return -1;
1325
1326        list_add(&notify->list, &v->notify);
1327        return 0;
1328}
1329
1330EXPORT_SYMBOL(vpe_notify);
1331
1332char *vpe_getcwd(int index)
1333{
1334        struct vpe *v;
1335
1336        if ((v = get_vpe(index)) == NULL)
1337                return NULL;
1338
1339        return v->cwd;
1340}
1341
1342EXPORT_SYMBOL(vpe_getcwd);
1343
1344#ifdef CONFIG_MIPS_APSP_KSPD
1345static void kspd_sp_exit( int sp_id)
1346{
1347        cleanup_tc(get_tc(sp_id));
1348}
1349#endif
1350
1351static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
1352                          const char *buf, size_t len)
1353{
1354        struct vpe *vpe = get_vpe(tclimit);
1355        struct vpe_notifications *not;
1356
1357        list_for_each_entry(not, &vpe->notify, list) {
1358                not->stop(tclimit);
1359        }
1360
1361        release_progmem(vpe->load_addr);
1362        cleanup_tc(get_tc(tclimit));
1363        vpe_stop(vpe);
1364        vpe_free(vpe);
1365
1366        return len;
1367}
1368
1369static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr,
1370                         char *buf)
1371{
1372        struct vpe *vpe = get_vpe(tclimit);
1373
1374        return sprintf(buf, "%d\n", vpe->ntcs);
1375}
1376
1377static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr,
1378                          const char *buf, size_t len)
1379{
1380        struct vpe *vpe = get_vpe(tclimit);
1381        unsigned long new;
1382        char *endp;
1383
1384        new = simple_strtoul(buf, &endp, 0);
1385        if (endp == buf)
1386                goto out_einval;
1387
1388        if (new == 0 || new > (hw_tcs - tclimit))
1389                goto out_einval;
1390
1391        vpe->ntcs = new;
1392
1393        return len;
1394
1395out_einval:
1396        return -EINVAL;
1397}
1398
1399static struct device_attribute vpe_class_attributes[] = {
1400        __ATTR(kill, S_IWUSR, NULL, store_kill),
1401        __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs),
1402        {}
1403};
1404
1405static void vpe_device_release(struct device *cd)
1406{
1407        kfree(cd);
1408}
1409
1410struct class vpe_class = {
1411        .name = "vpe",
1412        .owner = THIS_MODULE,
1413        .dev_release = vpe_device_release,
1414        .dev_attrs = vpe_class_attributes,
1415};
1416
1417struct device vpe_device;
1418
1419static int __init vpe_module_init(void)
1420{
1421        unsigned int mtflags, vpflags;
1422        unsigned long flags, val;
1423        struct vpe *v = NULL;
1424        struct tc *t;
1425        int tc, err;
1426
1427        if (!cpu_has_mipsmt) {
1428                printk("VPE loader: not a MIPS MT capable processor\n");
1429                return -ENODEV;
1430        }
1431
1432        if (vpelimit == 0) {
1433                printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
1434                       "initializing VPE loader.\nPass maxvpes=<n> argument as "
1435                       "kernel argument\n");
1436
1437                return -ENODEV;
1438        }
1439
1440        if (tclimit == 0) {
1441                printk(KERN_WARNING "No TCs reserved for AP/SP, not "
1442                       "initializing VPE loader.\nPass maxtcs=<n> argument as "
1443                       "kernel argument\n");
1444
1445                return -ENODEV;
1446        }
1447
1448        major = register_chrdev(0, module_name, &vpe_fops);
1449        if (major < 0) {
1450                printk("VPE loader: unable to register character device\n");
1451                return major;
1452        }
1453
1454        err = class_register(&vpe_class);
1455        if (err) {
1456                printk(KERN_ERR "vpe_class registration failed\n");
1457                goto out_chrdev;
1458        }
1459
1460        device_initialize(&vpe_device);
1461        vpe_device.class        = &vpe_class,
1462        vpe_device.parent       = NULL,
1463        dev_set_name(&vpe_device, "vpe1");
1464        vpe_device.devt = MKDEV(major, minor);
1465        err = device_add(&vpe_device);
1466        if (err) {
1467                printk(KERN_ERR "Adding vpe_device failed\n");
1468                goto out_class;
1469        }
1470
1471        local_irq_save(flags);
1472        mtflags = dmt();
1473        vpflags = dvpe();
1474
1475        /* Put MVPE's into 'configuration state' */
1476        set_c0_mvpcontrol(MVPCONTROL_VPC);
1477
1478        /* dump_mtregs(); */
1479
1480        val = read_c0_mvpconf0();
1481        hw_tcs = (val & MVPCONF0_PTC) + 1;
1482        hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
1483
1484        for (tc = tclimit; tc < hw_tcs; tc++) {
1485                /*
1486                 * Must re-enable multithreading temporarily or in case we
1487                 * reschedule send IPIs or similar we might hang.
1488                 */
1489                clear_c0_mvpcontrol(MVPCONTROL_VPC);
1490                evpe(vpflags);
1491                emt(mtflags);
1492                local_irq_restore(flags);
1493                t = alloc_tc(tc);
1494                if (!t) {
1495                        err = -ENOMEM;
1496                        goto out;
1497                }
1498
1499                local_irq_save(flags);
1500                mtflags = dmt();
1501                vpflags = dvpe();
1502                set_c0_mvpcontrol(MVPCONTROL_VPC);
1503
1504                /* VPE's */
1505                if (tc < hw_tcs) {
1506                        settc(tc);
1507
1508                        if ((v = alloc_vpe(tc)) == NULL) {
1509                                printk(KERN_WARNING "VPE: unable to allocate VPE\n");
1510
1511                                goto out_reenable;
1512                        }
1513
1514                        v->ntcs = hw_tcs - tclimit;
1515
1516                        /* add the tc to the list of this vpe's tc's. */
1517                        list_add(&t->tc, &v->tc);
1518
1519                        /* deactivate all but vpe0 */
1520                        if (tc >= tclimit) {
1521                                unsigned long tmp = read_vpe_c0_vpeconf0();
1522
1523                                tmp &= ~VPECONF0_VPA;
1524
1525                                /* master VPE */
1526                                tmp |= VPECONF0_MVP;
1527                                write_vpe_c0_vpeconf0(tmp);
1528                        }
1529
1530                        /* disable multi-threading with TC's */
1531                        write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
1532
1533                        if (tc >= vpelimit) {
1534                                /*
1535                                 * Set config to be the same as vpe0,
1536                                 * particularly kseg0 coherency alg
1537                                 */
1538                                write_vpe_c0_config(read_c0_config());
1539                        }
1540                }
1541
1542                /* TC's */
1543                t->pvpe = v;    /* set the parent vpe */
1544
1545                if (tc >= tclimit) {
1546                        unsigned long tmp;
1547
1548                        settc(tc);
1549
1550                        /* Any TC that is bound to VPE0 gets left as is - in case
1551                           we are running SMTC on VPE0. A TC that is bound to any
1552                           other VPE gets bound to VPE0, ideally I'd like to make
1553                           it homeless but it doesn't appear to let me bind a TC
1554                           to a non-existent VPE. Which is perfectly reasonable.
1555
1556                           The (un)bound state is visible to an EJTAG probe so may
1557                           notify GDB...
1558                        */
1559
1560                        if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) {
1561                                /* tc is bound >vpe0 */
1562                                write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
1563
1564                                t->pvpe = get_vpe(0);   /* set the parent vpe */
1565                        }
1566
1567                        /* halt the TC */
1568                        write_tc_c0_tchalt(TCHALT_H);
1569                        mips_ihb();
1570
1571                        tmp = read_tc_c0_tcstatus();
1572
1573                        /* mark not activated and not dynamically allocatable */
1574                        tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1575                        tmp |= TCSTATUS_IXMT;   /* interrupt exempt */
1576                        write_tc_c0_tcstatus(tmp);
1577                }
1578        }
1579
1580out_reenable:
1581        /* release config state */
1582        clear_c0_mvpcontrol(MVPCONTROL_VPC);
1583
1584        evpe(vpflags);
1585        emt(mtflags);
1586        local_irq_restore(flags);
1587
1588#ifdef CONFIG_MIPS_APSP_KSPD
1589        kspd_events.kspd_sp_exit = kspd_sp_exit;
1590#endif
1591        return 0;
1592
1593out_class:
1594        class_unregister(&vpe_class);
1595out_chrdev:
1596        unregister_chrdev(major, module_name);
1597
1598out:
1599        return err;
1600}
1601
1602static void __exit vpe_module_exit(void)
1603{
1604        struct vpe *v, *n;
1605
1606        device_del(&vpe_device);
1607        unregister_chrdev(major, module_name);
1608
1609        /* No locking needed here */
1610        list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
1611                if (v->state != VPE_STATE_UNUSED)
1612                        release_vpe(v);
1613        }
1614}
1615
1616module_init(vpe_module_init);
1617module_exit(vpe_module_exit);
1618MODULE_DESCRIPTION("MIPS VPE Loader");
1619MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
1620MODULE_LICENSE("GPL");
1621