linux/arch/sh/kernel/io_trapped.c
<<
>>
Prefs
   1/*
   2 * Trapped io support
   3 *
   4 * Copyright (C) 2008 Magnus Damm
   5 *
   6 * Intercept io operations by trapping.
   7 *
   8 * This file is subject to the terms and conditions of the GNU General Public
   9 * License.  See the file "COPYING" in the main directory of this archive
  10 * for more details.
  11 */
  12#include <linux/kernel.h>
  13#include <linux/mm.h>
  14#include <linux/bitops.h>
  15#include <linux/vmalloc.h>
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <asm/system.h>
  19#include <asm/mmu_context.h>
  20#include <asm/uaccess.h>
  21#include <asm/io.h>
  22#include <asm/io_trapped.h>
  23
  24#define TRAPPED_PAGES_MAX 16
  25
  26#ifdef CONFIG_HAS_IOPORT
  27LIST_HEAD(trapped_io);
  28EXPORT_SYMBOL_GPL(trapped_io);
  29#endif
  30#ifdef CONFIG_HAS_IOMEM
  31LIST_HEAD(trapped_mem);
  32EXPORT_SYMBOL_GPL(trapped_mem);
  33#endif
  34static DEFINE_SPINLOCK(trapped_lock);
  35
  36static int trapped_io_disable __read_mostly;
  37
  38static int __init trapped_io_setup(char *__unused)
  39{
  40        trapped_io_disable = 1;
  41        return 1;
  42}
  43__setup("noiotrap", trapped_io_setup);
  44
  45int register_trapped_io(struct trapped_io *tiop)
  46{
  47        struct resource *res;
  48        unsigned long len = 0, flags = 0;
  49        struct page *pages[TRAPPED_PAGES_MAX];
  50        int k, n;
  51
  52        if (unlikely(trapped_io_disable))
  53                return 0;
  54
  55        /* structure must be page aligned */
  56        if ((unsigned long)tiop & (PAGE_SIZE - 1))
  57                goto bad;
  58
  59        for (k = 0; k < tiop->num_resources; k++) {
  60                res = tiop->resource + k;
  61                len += roundup((res->end - res->start) + 1, PAGE_SIZE);
  62                flags |= res->flags;
  63        }
  64
  65        /* support IORESOURCE_IO _or_ MEM, not both */
  66        if (hweight_long(flags) != 1)
  67                goto bad;
  68
  69        n = len >> PAGE_SHIFT;
  70
  71        if (n >= TRAPPED_PAGES_MAX)
  72                goto bad;
  73
  74        for (k = 0; k < n; k++)
  75                pages[k] = virt_to_page(tiop);
  76
  77        tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
  78        if (!tiop->virt_base)
  79                goto bad;
  80
  81        len = 0;
  82        for (k = 0; k < tiop->num_resources; k++) {
  83                res = tiop->resource + k;
  84                pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
  85                       (unsigned long)(tiop->virt_base + len),
  86                       res->flags & IORESOURCE_IO ? "io" : "mmio",
  87                       (unsigned long)res->start);
  88                len += roundup((res->end - res->start) + 1, PAGE_SIZE);
  89        }
  90
  91        tiop->magic = IO_TRAPPED_MAGIC;
  92        INIT_LIST_HEAD(&tiop->list);
  93        spin_lock_irq(&trapped_lock);
  94        if (flags & IORESOURCE_IO)
  95                list_add(&tiop->list, &trapped_io);
  96        if (flags & IORESOURCE_MEM)
  97                list_add(&tiop->list, &trapped_mem);
  98        spin_unlock_irq(&trapped_lock);
  99
 100        return 0;
 101 bad:
 102        pr_warning("unable to install trapped io filter\n");
 103        return -1;
 104}
 105EXPORT_SYMBOL_GPL(register_trapped_io);
 106
 107void __iomem *match_trapped_io_handler(struct list_head *list,
 108                                       unsigned long offset,
 109                                       unsigned long size)
 110{
 111        unsigned long voffs;
 112        struct trapped_io *tiop;
 113        struct resource *res;
 114        int k, len;
 115        unsigned long flags;
 116
 117        spin_lock_irqsave(&trapped_lock, flags);
 118        list_for_each_entry(tiop, list, list) {
 119                voffs = 0;
 120                for (k = 0; k < tiop->num_resources; k++) {
 121                        res = tiop->resource + k;
 122                        if (res->start == offset) {
 123                                spin_unlock_irqrestore(&trapped_lock, flags);
 124                                return tiop->virt_base + voffs;
 125                        }
 126
 127                        len = (res->end - res->start) + 1;
 128                        voffs += roundup(len, PAGE_SIZE);
 129                }
 130        }
 131        spin_unlock_irqrestore(&trapped_lock, flags);
 132        return NULL;
 133}
 134EXPORT_SYMBOL_GPL(match_trapped_io_handler);
 135
 136static struct trapped_io *lookup_tiop(unsigned long address)
 137{
 138        pgd_t *pgd_k;
 139        pud_t *pud_k;
 140        pmd_t *pmd_k;
 141        pte_t *pte_k;
 142        pte_t entry;
 143
 144        pgd_k = swapper_pg_dir + pgd_index(address);
 145        if (!pgd_present(*pgd_k))
 146                return NULL;
 147
 148        pud_k = pud_offset(pgd_k, address);
 149        if (!pud_present(*pud_k))
 150                return NULL;
 151
 152        pmd_k = pmd_offset(pud_k, address);
 153        if (!pmd_present(*pmd_k))
 154                return NULL;
 155
 156        pte_k = pte_offset_kernel(pmd_k, address);
 157        entry = *pte_k;
 158
 159        return pfn_to_kaddr(pte_pfn(entry));
 160}
 161
 162static unsigned long lookup_address(struct trapped_io *tiop,
 163                                    unsigned long address)
 164{
 165        struct resource *res;
 166        unsigned long vaddr = (unsigned long)tiop->virt_base;
 167        unsigned long len;
 168        int k;
 169
 170        for (k = 0; k < tiop->num_resources; k++) {
 171                res = tiop->resource + k;
 172                len = roundup((res->end - res->start) + 1, PAGE_SIZE);
 173                if (address < (vaddr + len))
 174                        return res->start + (address - vaddr);
 175                vaddr += len;
 176        }
 177        return 0;
 178}
 179
 180static unsigned long long copy_word(unsigned long src_addr, int src_len,
 181                                    unsigned long dst_addr, int dst_len)
 182{
 183        unsigned long long tmp = 0;
 184
 185        switch (src_len) {
 186        case 1:
 187                tmp = ctrl_inb(src_addr);
 188                break;
 189        case 2:
 190                tmp = ctrl_inw(src_addr);
 191                break;
 192        case 4:
 193                tmp = ctrl_inl(src_addr);
 194                break;
 195        case 8:
 196                tmp = ctrl_inq(src_addr);
 197                break;
 198        }
 199
 200        switch (dst_len) {
 201        case 1:
 202                ctrl_outb(tmp, dst_addr);
 203                break;
 204        case 2:
 205                ctrl_outw(tmp, dst_addr);
 206                break;
 207        case 4:
 208                ctrl_outl(tmp, dst_addr);
 209                break;
 210        case 8:
 211                ctrl_outq(tmp, dst_addr);
 212                break;
 213        }
 214
 215        return tmp;
 216}
 217
 218static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
 219{
 220        struct trapped_io *tiop;
 221        unsigned long src_addr = (unsigned long)src;
 222        unsigned long long tmp;
 223
 224        pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
 225        tiop = lookup_tiop(src_addr);
 226        WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
 227
 228        src_addr = lookup_address(tiop, src_addr);
 229        if (!src_addr)
 230                return cnt;
 231
 232        tmp = copy_word(src_addr,
 233                        max_t(unsigned long, cnt,
 234                              (tiop->minimum_bus_width / 8)),
 235                        (unsigned long)dst, cnt);
 236
 237        pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
 238        return 0;
 239}
 240
 241static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
 242{
 243        struct trapped_io *tiop;
 244        unsigned long dst_addr = (unsigned long)dst;
 245        unsigned long long tmp;
 246
 247        pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
 248        tiop = lookup_tiop(dst_addr);
 249        WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
 250
 251        dst_addr = lookup_address(tiop, dst_addr);
 252        if (!dst_addr)
 253                return cnt;
 254
 255        tmp = copy_word((unsigned long)src, cnt,
 256                        dst_addr, max_t(unsigned long, cnt,
 257                                        (tiop->minimum_bus_width / 8)));
 258
 259        pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
 260        return 0;
 261}
 262
 263static struct mem_access trapped_io_access = {
 264        from_device,
 265        to_device,
 266};
 267
 268int handle_trapped_io(struct pt_regs *regs, unsigned long address)
 269{
 270        mm_segment_t oldfs;
 271        insn_size_t instruction;
 272        int tmp;
 273
 274        if (!lookup_tiop(address))
 275                return 0;
 276
 277        WARN_ON(user_mode(regs));
 278
 279        oldfs = get_fs();
 280        set_fs(KERNEL_DS);
 281        if (copy_from_user(&instruction, (void *)(regs->pc),
 282                           sizeof(instruction))) {
 283                set_fs(oldfs);
 284                return 0;
 285        }
 286
 287        tmp = handle_unaligned_access(instruction, regs,
 288                                      &trapped_io_access, 1);
 289        set_fs(oldfs);
 290        return tmp == 0;
 291}
 292