linux/drivers/ieee1394/dma.c
<<
>>
Prefs
   1/*
   2 * DMA region bookkeeping routines
   3 *
   4 * Copyright (C) 2002 Maas Digital LLC
   5 *
   6 * This code is licensed under the GPL.  See the file COPYING in the root
   7 * directory of the kernel sources for details.
   8 */
   9
  10#include <linux/mm.h>
  11#include <linux/module.h>
  12#include <linux/pci.h>
  13#include <linux/slab.h>
  14#include <linux/vmalloc.h>
  15#include <linux/scatterlist.h>
  16
  17#include "dma.h"
  18
  19/* dma_prog_region */
  20
  21void dma_prog_region_init(struct dma_prog_region *prog)
  22{
  23        prog->kvirt = NULL;
  24        prog->dev = NULL;
  25        prog->n_pages = 0;
  26        prog->bus_addr = 0;
  27}
  28
  29int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
  30                          struct pci_dev *dev)
  31{
  32        /* round up to page size */
  33        n_bytes = PAGE_ALIGN(n_bytes);
  34
  35        prog->n_pages = n_bytes >> PAGE_SHIFT;
  36
  37        prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
  38        if (!prog->kvirt) {
  39                printk(KERN_ERR
  40                       "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
  41                dma_prog_region_free(prog);
  42                return -ENOMEM;
  43        }
  44
  45        prog->dev = dev;
  46
  47        return 0;
  48}
  49
  50void dma_prog_region_free(struct dma_prog_region *prog)
  51{
  52        if (prog->kvirt) {
  53                pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT,
  54                                    prog->kvirt, prog->bus_addr);
  55        }
  56
  57        prog->kvirt = NULL;
  58        prog->dev = NULL;
  59        prog->n_pages = 0;
  60        prog->bus_addr = 0;
  61}
  62
  63/* dma_region */
  64
  65/**
  66 * dma_region_init - clear out all fields but do not allocate anything
  67 */
  68void dma_region_init(struct dma_region *dma)
  69{
  70        dma->kvirt = NULL;
  71        dma->dev = NULL;
  72        dma->n_pages = 0;
  73        dma->n_dma_pages = 0;
  74        dma->sglist = NULL;
  75}
  76
  77/**
  78 * dma_region_alloc - allocate the buffer and map it to the IOMMU
  79 */
  80int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
  81                     struct pci_dev *dev, int direction)
  82{
  83        unsigned int i;
  84
  85        /* round up to page size */
  86        n_bytes = PAGE_ALIGN(n_bytes);
  87
  88        dma->n_pages = n_bytes >> PAGE_SHIFT;
  89
  90        dma->kvirt = vmalloc_32(n_bytes);
  91        if (!dma->kvirt) {
  92                printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
  93                goto err;
  94        }
  95
  96        /* Clear the ram out, no junk to the user */
  97        memset(dma->kvirt, 0, n_bytes);
  98
  99        /* allocate scatter/gather list */
 100        dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist));
 101        if (!dma->sglist) {
 102                printk(KERN_ERR "dma_region_alloc: vmalloc(sglist) failed\n");
 103                goto err;
 104        }
 105
 106        sg_init_table(dma->sglist, dma->n_pages);
 107
 108        /* fill scatter/gather list with pages */
 109        for (i = 0; i < dma->n_pages; i++) {
 110                unsigned long va =
 111                    (unsigned long)dma->kvirt + (i << PAGE_SHIFT);
 112
 113                sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va),
 114                                PAGE_SIZE, 0);
 115        }
 116
 117        /* map sglist to the IOMMU */
 118        dma->n_dma_pages =
 119            pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
 120
 121        if (dma->n_dma_pages == 0) {
 122                printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
 123                goto err;
 124        }
 125
 126        dma->dev = dev;
 127        dma->direction = direction;
 128
 129        return 0;
 130
 131      err:
 132        dma_region_free(dma);
 133        return -ENOMEM;
 134}
 135
 136/**
 137 * dma_region_free - unmap and free the buffer
 138 */
 139void dma_region_free(struct dma_region *dma)
 140{
 141        if (dma->n_dma_pages) {
 142                pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages,
 143                             dma->direction);
 144                dma->n_dma_pages = 0;
 145                dma->dev = NULL;
 146        }
 147
 148        vfree(dma->sglist);
 149        dma->sglist = NULL;
 150
 151        vfree(dma->kvirt);
 152        dma->kvirt = NULL;
 153        dma->n_pages = 0;
 154}
 155
 156/* find the scatterlist index and remaining offset corresponding to a
 157   given offset from the beginning of the buffer */
 158static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
 159                                  unsigned int start, unsigned long *rem)
 160{
 161        int i;
 162        unsigned long off = offset;
 163
 164        for (i = start; i < dma->n_dma_pages; i++) {
 165                if (off < sg_dma_len(&dma->sglist[i])) {
 166                        *rem = off;
 167                        break;
 168                }
 169
 170                off -= sg_dma_len(&dma->sglist[i]);
 171        }
 172
 173        BUG_ON(i >= dma->n_dma_pages);
 174
 175        return i;
 176}
 177
 178/**
 179 * dma_region_offset_to_bus - get bus address of an offset within a DMA region
 180 *
 181 * Returns the DMA bus address of the byte with the given @offset relative to
 182 * the beginning of the @dma.
 183 */
 184dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
 185                                    unsigned long offset)
 186{
 187        unsigned long rem = 0;
 188
 189        struct scatterlist *sg =
 190            &dma->sglist[dma_region_find(dma, offset, 0, &rem)];
 191        return sg_dma_address(sg) + rem;
 192}
 193
 194/**
 195 * dma_region_sync_for_cpu - sync the CPU's view of the buffer
 196 */
 197void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
 198                             unsigned long len)
 199{
 200        int first, last;
 201        unsigned long rem = 0;
 202
 203        if (!len)
 204                len = 1;
 205
 206        first = dma_region_find(dma, offset, 0, &rem);
 207        last = dma_region_find(dma, rem + len - 1, first, &rem);
 208
 209        pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1,
 210                                dma->direction);
 211}
 212
 213/**
 214 * dma_region_sync_for_device - sync the IO bus' view of the buffer
 215 */
 216void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
 217                                unsigned long len)
 218{
 219        int first, last;
 220        unsigned long rem = 0;
 221
 222        if (!len)
 223                len = 1;
 224
 225        first = dma_region_find(dma, offset, 0, &rem);
 226        last = dma_region_find(dma, rem + len - 1, first, &rem);
 227
 228        pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first],
 229                                   last - first + 1, dma->direction);
 230}
 231
 232#ifdef CONFIG_MMU
 233
 234/* nopage() handler for mmap access */
 235
 236static struct page *dma_region_pagefault(struct vm_area_struct *area,
 237                                         unsigned long address, int *type)
 238{
 239        unsigned long offset;
 240        unsigned long kernel_virt_addr;
 241        struct page *ret = NOPAGE_SIGBUS;
 242
 243        struct dma_region *dma = (struct dma_region *)area->vm_private_data;
 244
 245        if (!dma->kvirt)
 246                goto out;
 247
 248        if ((address < (unsigned long)area->vm_start) ||
 249            (address >
 250             (unsigned long)area->vm_start + (dma->n_pages << PAGE_SHIFT)))
 251                goto out;
 252
 253        if (type)
 254                *type = VM_FAULT_MINOR;
 255        offset = address - area->vm_start;
 256        kernel_virt_addr = (unsigned long)dma->kvirt + offset;
 257        ret = vmalloc_to_page((void *)kernel_virt_addr);
 258        get_page(ret);
 259      out:
 260        return ret;
 261}
 262
 263static struct vm_operations_struct dma_region_vm_ops = {
 264        .nopage = dma_region_pagefault,
 265};
 266
 267/**
 268 * dma_region_mmap - map the buffer into a user space process
 269 */
 270int dma_region_mmap(struct dma_region *dma, struct file *file,
 271                    struct vm_area_struct *vma)
 272{
 273        unsigned long size;
 274
 275        if (!dma->kvirt)
 276                return -EINVAL;
 277
 278        /* must be page-aligned */
 279        if (vma->vm_pgoff != 0)
 280                return -EINVAL;
 281
 282        /* check the length */
 283        size = vma->vm_end - vma->vm_start;
 284        if (size > (dma->n_pages << PAGE_SHIFT))
 285                return -EINVAL;
 286
 287        vma->vm_ops = &dma_region_vm_ops;
 288        vma->vm_private_data = dma;
 289        vma->vm_file = file;
 290        vma->vm_flags |= VM_RESERVED;
 291
 292        return 0;
 293}
 294
 295#else                           /* CONFIG_MMU */
 296
 297int dma_region_mmap(struct dma_region *dma, struct file *file,
 298                    struct vm_area_struct *vma)
 299{
 300        return -EINVAL;
 301}
 302
 303#endif                          /* CONFIG_MMU */
 304