linux/arch/mips/kernel/vpe.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved.
   7 * Copyright (C) 2013 Imagination Technologies Ltd.
   8 *
   9 * VPE spport module for loading a MIPS SP program into VPE1. The SP
  10 * environment is rather simple since there are no TLBs. It needs
  11 * to be relocatable (or partiall linked). Initialize your stack in
  12 * the startup-code. The loader looks for the symbol __start and sets
  13 * up the execution to resume from there. To load and run, simply do
  14 * a cat SP 'binary' to the /dev/vpe1 device.
  15 */
  16#include <linux/kernel.h>
  17#include <linux/device.h>
  18#include <linux/fs.h>
  19#include <linux/init.h>
  20#include <linux/slab.h>
  21#include <linux/list.h>
  22#include <linux/vmalloc.h>
  23#include <linux/elf.h>
  24#include <linux/seq_file.h>
  25#include <linux/syscalls.h>
  26#include <linux/moduleloader.h>
  27#include <linux/interrupt.h>
  28#include <linux/poll.h>
  29#include <linux/memblock.h>
  30#include <asm/mipsregs.h>
  31#include <asm/mipsmtregs.h>
  32#include <asm/cacheflush.h>
  33#include <linux/atomic.h>
  34#include <asm/mips_mt.h>
  35#include <asm/processor.h>
  36#include <asm/vpe.h>
  37
  38#ifndef ARCH_SHF_SMALL
  39#define ARCH_SHF_SMALL 0
  40#endif
  41
  42/* If this is set, the section belongs in the init part of the module */
  43#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
  44
  45struct vpe_control vpecontrol = {
  46        .vpe_list_lock  = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
  47        .vpe_list       = LIST_HEAD_INIT(vpecontrol.vpe_list),
  48        .tc_list_lock   = __SPIN_LOCK_UNLOCKED(tc_list_lock),
  49        .tc_list        = LIST_HEAD_INIT(vpecontrol.tc_list)
  50};
  51
  52/* get the vpe associated with this minor */
  53struct vpe *get_vpe(int minor)
  54{
  55        struct vpe *res, *v;
  56
  57        if (!cpu_has_mipsmt)
  58                return NULL;
  59
  60        res = NULL;
  61        spin_lock(&vpecontrol.vpe_list_lock);
  62        list_for_each_entry(v, &vpecontrol.vpe_list, list) {
  63                if (v->minor == VPE_MODULE_MINOR) {
  64                        res = v;
  65                        break;
  66                }
  67        }
  68        spin_unlock(&vpecontrol.vpe_list_lock);
  69
  70        return res;
  71}
  72
  73/* get the vpe associated with this minor */
  74struct tc *get_tc(int index)
  75{
  76        struct tc *res, *t;
  77
  78        res = NULL;
  79        spin_lock(&vpecontrol.tc_list_lock);
  80        list_for_each_entry(t, &vpecontrol.tc_list, list) {
  81                if (t->index == index) {
  82                        res = t;
  83                        break;
  84                }
  85        }
  86        spin_unlock(&vpecontrol.tc_list_lock);
  87
  88        return res;
  89}
  90
  91/* allocate a vpe and associate it with this minor (or index) */
  92struct vpe *alloc_vpe(int minor)
  93{
  94        struct vpe *v;
  95
  96        v = kzalloc(sizeof(struct vpe), GFP_KERNEL);
  97        if (v == NULL)
  98                goto out;
  99
 100        INIT_LIST_HEAD(&v->tc);
 101        spin_lock(&vpecontrol.vpe_list_lock);
 102        list_add_tail(&v->list, &vpecontrol.vpe_list);
 103        spin_unlock(&vpecontrol.vpe_list_lock);
 104
 105        INIT_LIST_HEAD(&v->notify);
 106        v->minor = VPE_MODULE_MINOR;
 107
 108out:
 109        return v;
 110}
 111
 112/* allocate a tc. At startup only tc0 is running, all other can be halted. */
 113struct tc *alloc_tc(int index)
 114{
 115        struct tc *tc;
 116
 117        tc = kzalloc(sizeof(struct tc), GFP_KERNEL);
 118        if (tc == NULL)
 119                goto out;
 120
 121        INIT_LIST_HEAD(&tc->tc);
 122        tc->index = index;
 123
 124        spin_lock(&vpecontrol.tc_list_lock);
 125        list_add_tail(&tc->list, &vpecontrol.tc_list);
 126        spin_unlock(&vpecontrol.tc_list_lock);
 127
 128out:
 129        return tc;
 130}
 131
 132/* clean up and free everything */
 133void release_vpe(struct vpe *v)
 134{
 135        list_del(&v->list);
 136        if (v->load_addr)
 137                release_progmem(v);
 138        kfree(v);
 139}
 140
 141/* Find some VPE program space */
 142void *alloc_progmem(unsigned long len)
 143{
 144        void *addr;
 145
 146#ifdef CONFIG_MIPS_VPE_LOADER_TOM
 147        /*
 148         * This means you must tell Linux to use less memory than you
 149         * physically have, for example by passing a mem= boot argument.
 150         */
 151        addr = pfn_to_kaddr(max_low_pfn);
 152        memset(addr, 0, len);
 153#else
 154        /* simple grab some mem for now */
 155        addr = kzalloc(len, GFP_KERNEL);
 156#endif
 157
 158        return addr;
 159}
 160
 161void release_progmem(void *ptr)
 162{
 163#ifndef CONFIG_MIPS_VPE_LOADER_TOM
 164        kfree(ptr);
 165#endif
 166}
 167
 168/* Update size with this section: return offset. */
 169static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
 170{
 171        long ret;
 172
 173        ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
 174        *size = ret + sechdr->sh_size;
 175        return ret;
 176}
 177
 178/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
 179   might -- code, read-only data, read-write data, small data.  Tally
 180   sizes, and place the offsets into sh_entsize fields: high bit means it
 181   belongs in init. */
 182static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
 183                            Elf_Shdr *sechdrs, const char *secstrings)
 184{
 185        static unsigned long const masks[][2] = {
 186                /* NOTE: all executable code must be the first section
 187                 * in this array; otherwise modify the text_size
 188                 * finder in the two loops below */
 189                {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
 190                {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
 191                {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
 192                {ARCH_SHF_SMALL | SHF_ALLOC, 0}
 193        };
 194        unsigned int m, i;
 195
 196        for (i = 0; i < hdr->e_shnum; i++)
 197                sechdrs[i].sh_entsize = ~0UL;
 198
 199        for (m = 0; m < ARRAY_SIZE(masks); ++m) {
 200                for (i = 0; i < hdr->e_shnum; ++i) {
 201                        Elf_Shdr *s = &sechdrs[i];
 202
 203                        if ((s->sh_flags & masks[m][0]) != masks[m][0]
 204                            || (s->sh_flags & masks[m][1])
 205                            || s->sh_entsize != ~0UL)
 206                                continue;
 207                        s->sh_entsize =
 208                                get_offset((unsigned long *)&mod->core_layout.size, s);
 209                }
 210
 211                if (m == 0)
 212                        mod->core_layout.text_size = mod->core_layout.size;
 213
 214        }
 215}
 216
 217/* from module-elf32.c, but subverted a little */
 218
 219struct mips_hi16 {
 220        struct mips_hi16 *next;
 221        Elf32_Addr *addr;
 222        Elf32_Addr value;
 223};
 224
 225static struct mips_hi16 *mips_hi16_list;
 226static unsigned int gp_offs, gp_addr;
 227
 228static int apply_r_mips_none(struct module *me, uint32_t *location,
 229                             Elf32_Addr v)
 230{
 231        return 0;
 232}
 233
 234static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
 235                                Elf32_Addr v)
 236{
 237        int rel;
 238
 239        if (!(*location & 0xffff)) {
 240                rel = (int)v - gp_addr;
 241        } else {
 242                /* .sbss + gp(relative) + offset */
 243                /* kludge! */
 244                rel =  (int)(short)((int)v + gp_offs +
 245                                    (int)(short)(*location & 0xffff) - gp_addr);
 246        }
 247
 248        if ((rel > 32768) || (rel < -32768)) {
 249                pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n",
 250                         rel);
 251                return -ENOEXEC;
 252        }
 253
 254        *location = (*location & 0xffff0000) | (rel & 0xffff);
 255
 256        return 0;
 257}
 258
 259static int apply_r_mips_pc16(struct module *me, uint32_t *location,
 260                             Elf32_Addr v)
 261{
 262        int rel;
 263        rel = (((unsigned int)v - (unsigned int)location));
 264        rel >>= 2; /* because the offset is in _instructions_ not bytes. */
 265        rel -= 1;  /* and one instruction less due to the branch delay slot. */
 266
 267        if ((rel > 32768) || (rel < -32768)) {
 268                pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n",
 269                         rel);
 270                return -ENOEXEC;
 271        }
 272
 273        *location = (*location & 0xffff0000) | (rel & 0xffff);
 274
 275        return 0;
 276}
 277
 278static int apply_r_mips_32(struct module *me, uint32_t *location,
 279                           Elf32_Addr v)
 280{
 281        *location += v;
 282
 283        return 0;
 284}
 285
 286static int apply_r_mips_26(struct module *me, uint32_t *location,
 287                           Elf32_Addr v)
 288{
 289        if (v % 4) {
 290                pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n");
 291                return -ENOEXEC;
 292        }
 293
 294/*
 295 * Not desperately convinced this is a good check of an overflow condition
 296 * anyway. But it gets in the way of handling undefined weak symbols which
 297 * we want to set to zero.
 298 * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
 299 * printk(KERN_ERR
 300 * "module %s: relocation overflow\n",
 301 * me->name);
 302 * return -ENOEXEC;
 303 * }
 304 */
 305
 306        *location = (*location & ~0x03ffffff) |
 307                ((*location + (v >> 2)) & 0x03ffffff);
 308        return 0;
 309}
 310
 311static int apply_r_mips_hi16(struct module *me, uint32_t *location,
 312                             Elf32_Addr v)
 313{
 314        struct mips_hi16 *n;
 315
 316        /*
 317         * We cannot relocate this one now because we don't know the value of
 318         * the carry we need to add.  Save the information, and let LO16 do the
 319         * actual relocation.
 320         */
 321        n = kmalloc(sizeof(*n), GFP_KERNEL);
 322        if (!n)
 323                return -ENOMEM;
 324
 325        n->addr = location;
 326        n->value = v;
 327        n->next = mips_hi16_list;
 328        mips_hi16_list = n;
 329
 330        return 0;
 331}
 332
 333static int apply_r_mips_lo16(struct module *me, uint32_t *location,
 334                             Elf32_Addr v)
 335{
 336        unsigned long insnlo = *location;
 337        Elf32_Addr val, vallo;
 338        struct mips_hi16 *l, *next;
 339
 340        /* Sign extend the addend we extract from the lo insn.  */
 341        vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
 342
 343        if (mips_hi16_list != NULL) {
 344
 345                l = mips_hi16_list;
 346                while (l != NULL) {
 347                        unsigned long insn;
 348
 349                        /*
 350                         * The value for the HI16 had best be the same.
 351                         */
 352                        if (v != l->value) {
 353                                pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n");
 354                                goto out_free;
 355                        }
 356
 357                        /*
 358                         * Do the HI16 relocation.  Note that we actually don't
 359                         * need to know anything about the LO16 itself, except
 360                         * where to find the low 16 bits of the addend needed
 361                         * by the LO16.
 362                         */
 363                        insn = *l->addr;
 364                        val = ((insn & 0xffff) << 16) + vallo;
 365                        val += v;
 366
 367                        /*
 368                         * Account for the sign extension that will happen in
 369                         * the low bits.
 370                         */
 371                        val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
 372
 373                        insn = (insn & ~0xffff) | val;
 374                        *l->addr = insn;
 375
 376                        next = l->next;
 377                        kfree(l);
 378                        l = next;
 379                }
 380
 381                mips_hi16_list = NULL;
 382        }
 383
 384        /*
 385         * Ok, we're done with the HI16 relocs.  Now deal with the LO16.
 386         */
 387        val = v + vallo;
 388        insnlo = (insnlo & ~0xffff) | (val & 0xffff);
 389        *location = insnlo;
 390
 391        return 0;
 392
 393out_free:
 394        while (l != NULL) {
 395                next = l->next;
 396                kfree(l);
 397                l = next;
 398        }
 399        mips_hi16_list = NULL;
 400
 401        return -ENOEXEC;
 402}
 403
 404static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
 405                                Elf32_Addr v) = {
 406        [R_MIPS_NONE]   = apply_r_mips_none,
 407        [R_MIPS_32]     = apply_r_mips_32,
 408        [R_MIPS_26]     = apply_r_mips_26,
 409        [R_MIPS_HI16]   = apply_r_mips_hi16,
 410        [R_MIPS_LO16]   = apply_r_mips_lo16,
 411        [R_MIPS_GPREL16] = apply_r_mips_gprel16,
 412        [R_MIPS_PC16] = apply_r_mips_pc16
 413};
 414
 415static char *rstrs[] = {
 416        [R_MIPS_NONE]   = "MIPS_NONE",
 417        [R_MIPS_32]     = "MIPS_32",
 418        [R_MIPS_26]     = "MIPS_26",
 419        [R_MIPS_HI16]   = "MIPS_HI16",
 420        [R_MIPS_LO16]   = "MIPS_LO16",
 421        [R_MIPS_GPREL16] = "MIPS_GPREL16",
 422        [R_MIPS_PC16] = "MIPS_PC16"
 423};
 424
 425static int apply_relocations(Elf32_Shdr *sechdrs,
 426                      const char *strtab,
 427                      unsigned int symindex,
 428                      unsigned int relsec,
 429                      struct module *me)
 430{
 431        Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
 432        Elf32_Sym *sym;
 433        uint32_t *location;
 434        unsigned int i;
 435        Elf32_Addr v;
 436        int res;
 437
 438        for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 439                Elf32_Word r_info = rel[i].r_info;
 440
 441                /* This is where to make the change */
 442                location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
 443                        + rel[i].r_offset;
 444                /* This is the symbol it is referring to */
 445                sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
 446                        + ELF32_R_SYM(r_info);
 447
 448                if (!sym->st_value) {
 449                        pr_debug("%s: undefined weak symbol %s\n",
 450                                 me->name, strtab + sym->st_name);
 451                        /* just print the warning, dont barf */
 452                }
 453
 454                v = sym->st_value;
 455
 456                res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
 457                if (res) {
 458                        char *r = rstrs[ELF32_R_TYPE(r_info)];
 459                        pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n",
 460                                rel[i].r_offset, r ? r : "UNKNOWN",
 461                                strtab + sym->st_name);
 462                        return res;
 463                }
 464        }
 465
 466        return 0;
 467}
 468
 469static inline void save_gp_address(unsigned int secbase, unsigned int rel)
 470{
 471        gp_addr = secbase + rel;
 472        gp_offs = gp_addr - (secbase & 0xffff0000);
 473}
 474/* end module-elf32.c */
 475
 476/* Change all symbols so that sh_value encodes the pointer directly. */
 477static void simplify_symbols(Elf_Shdr *sechdrs,
 478                            unsigned int symindex,
 479                            const char *strtab,
 480                            const char *secstrings,
 481                            unsigned int nsecs, struct module *mod)
 482{
 483        Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
 484        unsigned long secbase, bssbase = 0;
 485        unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
 486        int size;
 487
 488        /* find the .bss section for COMMON symbols */
 489        for (i = 0; i < nsecs; i++) {
 490                if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
 491                        bssbase = sechdrs[i].sh_addr;
 492                        break;
 493                }
 494        }
 495
 496        for (i = 1; i < n; i++) {
 497                switch (sym[i].st_shndx) {
 498                case SHN_COMMON:
 499                        /* Allocate space for the symbol in the .bss section.
 500                           st_value is currently size.
 501                           We want it to have the address of the symbol. */
 502
 503                        size = sym[i].st_value;
 504                        sym[i].st_value = bssbase;
 505
 506                        bssbase += size;
 507                        break;
 508
 509                case SHN_ABS:
 510                        /* Don't need to do anything */
 511                        break;
 512
 513                case SHN_UNDEF:
 514                        /* ret = -ENOENT; */
 515                        break;
 516
 517                case SHN_MIPS_SCOMMON:
 518                        pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n",
 519                                 strtab + sym[i].st_name, sym[i].st_shndx);
 520                        /* .sbss section */
 521                        break;
 522
 523                default:
 524                        secbase = sechdrs[sym[i].st_shndx].sh_addr;
 525
 526                        if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0)
 527                                save_gp_address(secbase, sym[i].st_value);
 528
 529                        sym[i].st_value += secbase;
 530                        break;
 531                }
 532        }
 533}
 534
 535#ifdef DEBUG_ELFLOADER
 536static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex,
 537                            const char *strtab, struct module *mod)
 538{
 539        Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
 540        unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
 541
 542        pr_debug("dump_elfsymbols: n %d\n", n);
 543        for (i = 1; i < n; i++) {
 544                pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name,
 545                         sym[i].st_value);
 546        }
 547}
 548#endif
 549
 550static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs,
 551                                      unsigned int symindex, const char *strtab,
 552                                      struct module *mod)
 553{
 554        Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
 555        unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
 556
 557        for (i = 1; i < n; i++) {
 558                if (strcmp(strtab + sym[i].st_name, "__start") == 0)
 559                        v->__start = sym[i].st_value;
 560
 561                if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0)
 562                        v->shared_ptr = (void *)sym[i].st_value;
 563        }
 564
 565        if ((v->__start == 0) || (v->shared_ptr == NULL))
 566                return -1;
 567
 568        return 0;
 569}
 570
 571/*
 572 * Allocates a VPE with some program code space(the load address), copies the
 573 * contents of the program (p)buffer performing relocatations/etc, free's it
 574 * when finished.
 575 */
 576static int vpe_elfload(struct vpe *v)
 577{
 578        Elf_Ehdr *hdr;
 579        Elf_Shdr *sechdrs;
 580        long err = 0;
 581        char *secstrings, *strtab = NULL;
 582        unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
 583        struct module mod; /* so we can re-use the relocations code */
 584
 585        memset(&mod, 0, sizeof(struct module));
 586        strcpy(mod.name, "VPE loader");
 587
 588        hdr = (Elf_Ehdr *) v->pbuffer;
 589        len = v->plen;
 590
 591        /* Sanity checks against insmoding binaries or wrong arch,
 592           weird elf version */
 593        if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
 594            || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
 595            || !elf_check_arch(hdr)
 596            || hdr->e_shentsize != sizeof(*sechdrs)) {
 597                pr_warn("VPE loader: program wrong arch or weird elf version\n");
 598
 599                return -ENOEXEC;
 600        }
 601
 602        if (hdr->e_type == ET_REL)
 603                relocate = 1;
 604
 605        if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
 606                pr_err("VPE loader: program length %u truncated\n", len);
 607
 608                return -ENOEXEC;
 609        }
 610
 611        /* Convenience variables */
 612        sechdrs = (void *)hdr + hdr->e_shoff;
 613        secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 614        sechdrs[0].sh_addr = 0;
 615
 616        /* And these should exist, but gcc whinges if we don't init them */
 617        symindex = strindex = 0;
 618
 619        if (relocate) {
 620                for (i = 1; i < hdr->e_shnum; i++) {
 621                        if ((sechdrs[i].sh_type != SHT_NOBITS) &&
 622                            (len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) {
 623                                pr_err("VPE program length %u truncated\n",
 624                                       len);
 625                                return -ENOEXEC;
 626                        }
 627
 628                        /* Mark all sections sh_addr with their address in the
 629                           temporary image. */
 630                        sechdrs[i].sh_addr = (size_t) hdr +
 631                                sechdrs[i].sh_offset;
 632
 633                        /* Internal symbols and strings. */
 634                        if (sechdrs[i].sh_type == SHT_SYMTAB) {
 635                                symindex = i;
 636                                strindex = sechdrs[i].sh_link;
 637                                strtab = (char *)hdr +
 638                                        sechdrs[strindex].sh_offset;
 639                        }
 640                }
 641                layout_sections(&mod, hdr, sechdrs, secstrings);
 642        }
 643
 644        v->load_addr = alloc_progmem(mod.core_layout.size);
 645        if (!v->load_addr)
 646                return -ENOMEM;
 647
 648        pr_info("VPE loader: loading to %p\n", v->load_addr);
 649
 650        if (relocate) {
 651                for (i = 0; i < hdr->e_shnum; i++) {
 652                        void *dest;
 653
 654                        if (!(sechdrs[i].sh_flags & SHF_ALLOC))
 655                                continue;
 656
 657                        dest = v->load_addr + sechdrs[i].sh_entsize;
 658
 659                        if (sechdrs[i].sh_type != SHT_NOBITS)
 660                                memcpy(dest, (void *)sechdrs[i].sh_addr,
 661                                       sechdrs[i].sh_size);
 662                        /* Update sh_addr to point to copy in image. */
 663                        sechdrs[i].sh_addr = (unsigned long)dest;
 664
 665                        pr_debug(" section sh_name %s sh_addr 0x%x\n",
 666                                 secstrings + sechdrs[i].sh_name,
 667                                 sechdrs[i].sh_addr);
 668                }
 669
 670                /* Fix up syms, so that st_value is a pointer to location. */
 671                simplify_symbols(sechdrs, symindex, strtab, secstrings,
 672                                 hdr->e_shnum, &mod);
 673
 674                /* Now do relocations. */
 675                for (i = 1; i < hdr->e_shnum; i++) {
 676                        const char *strtab = (char *)sechdrs[strindex].sh_addr;
 677                        unsigned int info = sechdrs[i].sh_info;
 678
 679                        /* Not a valid relocation section? */
 680                        if (info >= hdr->e_shnum)
 681                                continue;
 682
 683                        /* Don't bother with non-allocated sections */
 684                        if (!(sechdrs[info].sh_flags & SHF_ALLOC))
 685                                continue;
 686
 687                        if (sechdrs[i].sh_type == SHT_REL)
 688                                err = apply_relocations(sechdrs, strtab,
 689                                                        symindex, i, &mod);
 690                        else if (sechdrs[i].sh_type == SHT_RELA)
 691                                err = apply_relocate_add(sechdrs, strtab,
 692                                                         symindex, i, &mod);
 693                        if (err < 0)
 694                                return err;
 695
 696                }
 697        } else {
 698                struct elf_phdr *phdr = (struct elf_phdr *)
 699                                                ((char *)hdr + hdr->e_phoff);
 700
 701                for (i = 0; i < hdr->e_phnum; i++) {
 702                        if (phdr->p_type == PT_LOAD) {
 703                                memcpy((void *)phdr->p_paddr,
 704                                       (char *)hdr + phdr->p_offset,
 705                                       phdr->p_filesz);
 706                                memset((void *)phdr->p_paddr + phdr->p_filesz,
 707                                       0, phdr->p_memsz - phdr->p_filesz);
 708                    }
 709                    phdr++;
 710                }
 711
 712                for (i = 0; i < hdr->e_shnum; i++) {
 713                        /* Internal symbols and strings. */
 714                        if (sechdrs[i].sh_type == SHT_SYMTAB) {
 715                                symindex = i;
 716                                strindex = sechdrs[i].sh_link;
 717                                strtab = (char *)hdr +
 718                                        sechdrs[strindex].sh_offset;
 719
 720                                /*
 721                                 * mark symtab's address for when we try
 722                                 * to find the magic symbols
 723                                 */
 724                                sechdrs[i].sh_addr = (size_t) hdr +
 725                                        sechdrs[i].sh_offset;
 726                        }
 727                }
 728        }
 729
 730        /* make sure it's physically written out */
 731        flush_icache_range((unsigned long)v->load_addr,
 732                           (unsigned long)v->load_addr + v->len);
 733
 734        if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
 735                if (v->__start == 0) {
 736                        pr_warn("VPE loader: program does not contain a __start symbol\n");
 737                        return -ENOEXEC;
 738                }
 739
 740                if (v->shared_ptr == NULL)
 741                        pr_warn("VPE loader: program does not contain vpe_shared symbol.\n"
 742                                " Unable to use AMVP (AP/SP) facilities.\n");
 743        }
 744
 745        pr_info(" elf loaded\n");
 746        return 0;
 747}
 748
 749static int getcwd(char *buff, int size)
 750{
 751        mm_segment_t old_fs;
 752        int ret;
 753
 754        old_fs = get_fs();
 755        set_fs(KERNEL_DS);
 756
 757        ret = sys_getcwd(buff, size);
 758
 759        set_fs(old_fs);
 760
 761        return ret;
 762}
 763
 764/* checks VPE is unused and gets ready to load program  */
 765static int vpe_open(struct inode *inode, struct file *filp)
 766{
 767        enum vpe_state state;
 768        struct vpe_notifications *notifier;
 769        struct vpe *v;
 770        int ret;
 771
 772        if (VPE_MODULE_MINOR != iminor(inode)) {
 773                /* assume only 1 device at the moment. */
 774                pr_warn("VPE loader: only vpe1 is supported\n");
 775
 776                return -ENODEV;
 777        }
 778
 779        v = get_vpe(aprp_cpu_index());
 780        if (v == NULL) {
 781                pr_warn("VPE loader: unable to get vpe\n");
 782
 783                return -ENODEV;
 784        }
 785
 786        state = xchg(&v->state, VPE_STATE_INUSE);
 787        if (state != VPE_STATE_UNUSED) {
 788                pr_debug("VPE loader: tc in use dumping regs\n");
 789
 790                list_for_each_entry(notifier, &v->notify, list)
 791                        notifier->stop(aprp_cpu_index());
 792
 793                release_progmem(v->load_addr);
 794                cleanup_tc(get_tc(aprp_cpu_index()));
 795        }
 796
 797        /* this of-course trashes what was there before... */
 798        v->pbuffer = vmalloc(P_SIZE);
 799        if (!v->pbuffer) {
 800                pr_warn("VPE loader: unable to allocate memory\n");
 801                return -ENOMEM;
 802        }
 803        v->plen = P_SIZE;
 804        v->load_addr = NULL;
 805        v->len = 0;
 806
 807        v->cwd[0] = 0;
 808        ret = getcwd(v->cwd, VPE_PATH_MAX);
 809        if (ret < 0)
 810                pr_warn("VPE loader: open, getcwd returned %d\n", ret);
 811
 812        v->shared_ptr = NULL;
 813        v->__start = 0;
 814
 815        return 0;
 816}
 817
 818static int vpe_release(struct inode *inode, struct file *filp)
 819{
 820#if defined(CONFIG_MIPS_VPE_LOADER_MT) || defined(CONFIG_MIPS_VPE_LOADER_CMP)
 821        struct vpe *v;
 822        Elf_Ehdr *hdr;
 823        int ret = 0;
 824
 825        v = get_vpe(aprp_cpu_index());
 826        if (v == NULL)
 827                return -ENODEV;
 828
 829        hdr = (Elf_Ehdr *) v->pbuffer;
 830        if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
 831                if (vpe_elfload(v) >= 0) {
 832                        vpe_run(v);
 833                } else {
 834                        pr_warn("VPE loader: ELF load failed.\n");
 835                        ret = -ENOEXEC;
 836                }
 837        } else {
 838                pr_warn("VPE loader: only elf files are supported\n");
 839                ret = -ENOEXEC;
 840        }
 841
 842        /* It's good to be able to run the SP and if it chokes have a look at
 843           the /dev/rt?. But if we reset the pointer to the shared struct we
 844           lose what has happened. So perhaps if garbage is sent to the vpe
 845           device, use it as a trigger for the reset. Hopefully a nice
 846           executable will be along shortly. */
 847        if (ret < 0)
 848                v->shared_ptr = NULL;
 849
 850        vfree(v->pbuffer);
 851        v->plen = 0;
 852
 853        return ret;
 854#else
 855        pr_warn("VPE loader: ELF load failed.\n");
 856        return -ENOEXEC;
 857#endif
 858}
 859
 860static ssize_t vpe_write(struct file *file, const char __user *buffer,
 861                         size_t count, loff_t *ppos)
 862{
 863        size_t ret = count;
 864        struct vpe *v;
 865
 866        if (iminor(file_inode(file)) != VPE_MODULE_MINOR)
 867                return -ENODEV;
 868
 869        v = get_vpe(aprp_cpu_index());
 870
 871        if (v == NULL)
 872                return -ENODEV;
 873
 874        if ((count + v->len) > v->plen) {
 875                pr_warn("VPE loader: elf size too big. Perhaps strip unneeded symbols\n");
 876                return -ENOMEM;
 877        }
 878
 879        count -= copy_from_user(v->pbuffer + v->len, buffer, count);
 880        if (!count)
 881                return -EFAULT;
 882
 883        v->len += count;
 884        return ret;
 885}
 886
 887const struct file_operations vpe_fops = {
 888        .owner = THIS_MODULE,
 889        .open = vpe_open,
 890        .release = vpe_release,
 891        .write = vpe_write,
 892        .llseek = noop_llseek,
 893};
 894
 895void *vpe_get_shared(int index)
 896{
 897        struct vpe *v = get_vpe(index);
 898
 899        if (v == NULL)
 900                return NULL;
 901
 902        return v->shared_ptr;
 903}
 904EXPORT_SYMBOL(vpe_get_shared);
 905
 906int vpe_notify(int index, struct vpe_notifications *notify)
 907{
 908        struct vpe *v = get_vpe(index);
 909
 910        if (v == NULL)
 911                return -1;
 912
 913        list_add(&notify->list, &v->notify);
 914        return 0;
 915}
 916EXPORT_SYMBOL(vpe_notify);
 917
 918char *vpe_getcwd(int index)
 919{
 920        struct vpe *v = get_vpe(index);
 921
 922        if (v == NULL)
 923                return NULL;
 924
 925        return v->cwd;
 926}
 927EXPORT_SYMBOL(vpe_getcwd);
 928
 929module_init(vpe_module_init);
 930module_exit(vpe_module_exit);
 931MODULE_DESCRIPTION("MIPS VPE Loader");
 932MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
 933MODULE_LICENSE("GPL");
 934