linux/arch/powerpc/mm/dma-noncoherent.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version derived from arch/arm/mm/consistent.c
   3 *    Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
   4 *
   5 *  Copyright (C) 2000 Russell King
   6 *
   7 * Consistent memory allocators.  Used for DMA devices that want to
   8 * share uncached memory with the processor core.  The function return
   9 * is the virtual address and 'dma_handle' is the physical address.
  10 * Mostly stolen from the ARM port, with some changes for PowerPC.
  11 *                                              -- Dan
  12 *
  13 * Reorganized to get rid of the arch-specific consistent_* functions
  14 * and provide non-coherent implementations for the DMA API. -Matt
  15 *
  16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
  17 * implementation. This is pulled straight from ARM and barely
  18 * modified. -Matt
  19 *
  20 * This program is free software; you can redistribute it and/or modify
  21 * it under the terms of the GNU General Public License version 2 as
  22 * published by the Free Software Foundation.
  23 */
  24
  25#include <linux/sched.h>
  26#include <linux/slab.h>
  27#include <linux/kernel.h>
  28#include <linux/errno.h>
  29#include <linux/string.h>
  30#include <linux/types.h>
  31#include <linux/highmem.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/export.h>
  34
  35#include <asm/tlbflush.h>
  36#include <asm/dma.h>
  37
  38#include "mmu_decl.h"
  39
  40/*
  41 * This address range defaults to a value that is safe for all
  42 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
  43 * can be further configured for specific applications under
  44 * the "Advanced Setup" menu. -Matt
  45 */
  46#define CONSISTENT_BASE         (IOREMAP_TOP)
  47#define CONSISTENT_END          (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
  48#define CONSISTENT_OFFSET(x)    (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
  49
  50/*
  51 * This is the page table (2MB) covering uncached, DMA consistent allocations
  52 */
  53static DEFINE_SPINLOCK(consistent_lock);
  54
  55/*
  56 * VM region handling support.
  57 *
  58 * This should become something generic, handling VM region allocations for
  59 * vmalloc and similar (ioremap, module space, etc).
  60 *
  61 * I envisage vmalloc()'s supporting vm_struct becoming:
  62 *
  63 *  struct vm_struct {
  64 *    struct vm_region  region;
  65 *    unsigned long     flags;
  66 *    struct page       **pages;
  67 *    unsigned int      nr_pages;
  68 *    unsigned long     phys_addr;
  69 *  };
  70 *
  71 * get_vm_area() would then call vm_region_alloc with an appropriate
  72 * struct vm_region head (eg):
  73 *
  74 *  struct vm_region vmalloc_head = {
  75 *      .vm_list        = LIST_HEAD_INIT(vmalloc_head.vm_list),
  76 *      .vm_start       = VMALLOC_START,
  77 *      .vm_end         = VMALLOC_END,
  78 *  };
  79 *
  80 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
  81 * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
  82 * would have to initialise this each time prior to calling vm_region_alloc().
  83 */
  84struct ppc_vm_region {
  85        struct list_head        vm_list;
  86        unsigned long           vm_start;
  87        unsigned long           vm_end;
  88};
  89
  90static struct ppc_vm_region consistent_head = {
  91        .vm_list        = LIST_HEAD_INIT(consistent_head.vm_list),
  92        .vm_start       = CONSISTENT_BASE,
  93        .vm_end         = CONSISTENT_END,
  94};
  95
  96static struct ppc_vm_region *
  97ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
  98{
  99        unsigned long addr = head->vm_start, end = head->vm_end - size;
 100        unsigned long flags;
 101        struct ppc_vm_region *c, *new;
 102
 103        new = kmalloc(sizeof(struct ppc_vm_region), gfp);
 104        if (!new)
 105                goto out;
 106
 107        spin_lock_irqsave(&consistent_lock, flags);
 108
 109        list_for_each_entry(c, &head->vm_list, vm_list) {
 110                if ((addr + size) < addr)
 111                        goto nospc;
 112                if ((addr + size) <= c->vm_start)
 113                        goto found;
 114                addr = c->vm_end;
 115                if (addr > end)
 116                        goto nospc;
 117        }
 118
 119 found:
 120        /*
 121         * Insert this entry _before_ the one we found.
 122         */
 123        list_add_tail(&new->vm_list, &c->vm_list);
 124        new->vm_start = addr;
 125        new->vm_end = addr + size;
 126
 127        spin_unlock_irqrestore(&consistent_lock, flags);
 128        return new;
 129
 130 nospc:
 131        spin_unlock_irqrestore(&consistent_lock, flags);
 132        kfree(new);
 133 out:
 134        return NULL;
 135}
 136
 137static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
 138{
 139        struct ppc_vm_region *c;
 140
 141        list_for_each_entry(c, &head->vm_list, vm_list) {
 142                if (c->vm_start == addr)
 143                        goto out;
 144        }
 145        c = NULL;
 146 out:
 147        return c;
 148}
 149
 150/*
 151 * Allocate DMA-coherent memory space and return both the kernel remapped
 152 * virtual and bus address for that space.
 153 */
 154void *
 155__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
 156{
 157        struct page *page;
 158        struct ppc_vm_region *c;
 159        unsigned long order;
 160        u64 mask = ISA_DMA_THRESHOLD, limit;
 161
 162        if (dev) {
 163                mask = dev->coherent_dma_mask;
 164
 165                /*
 166                 * Sanity check the DMA mask - it must be non-zero, and
 167                 * must be able to be satisfied by a DMA allocation.
 168                 */
 169                if (mask == 0) {
 170                        dev_warn(dev, "coherent DMA mask is unset\n");
 171                        goto no_page;
 172                }
 173
 174                if ((~mask) & ISA_DMA_THRESHOLD) {
 175                        dev_warn(dev, "coherent DMA mask %#llx is smaller "
 176                                 "than system GFP_DMA mask %#llx\n",
 177                                 mask, (unsigned long long)ISA_DMA_THRESHOLD);
 178                        goto no_page;
 179                }
 180        }
 181
 182
 183        size = PAGE_ALIGN(size);
 184        limit = (mask + 1) & ~mask;
 185        if ((limit && size >= limit) ||
 186            size >= (CONSISTENT_END - CONSISTENT_BASE)) {
 187                printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
 188                       size, mask);
 189                return NULL;
 190        }
 191
 192        order = get_order(size);
 193
 194        /* Might be useful if we ever have a real legacy DMA zone... */
 195        if (mask != 0xffffffff)
 196                gfp |= GFP_DMA;
 197
 198        page = alloc_pages(gfp, order);
 199        if (!page)
 200                goto no_page;
 201
 202        /*
 203         * Invalidate any data that might be lurking in the
 204         * kernel direct-mapped region for device DMA.
 205         */
 206        {
 207                unsigned long kaddr = (unsigned long)page_address(page);
 208                memset(page_address(page), 0, size);
 209                flush_dcache_range(kaddr, kaddr + size);
 210        }
 211
 212        /*
 213         * Allocate a virtual address in the consistent mapping region.
 214         */
 215        c = ppc_vm_region_alloc(&consistent_head, size,
 216                            gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
 217        if (c) {
 218                unsigned long vaddr = c->vm_start;
 219                struct page *end = page + (1 << order);
 220
 221                split_page(page, order);
 222
 223                /*
 224                 * Set the "dma handle"
 225                 */
 226                *handle = page_to_phys(page);
 227
 228                do {
 229                        SetPageReserved(page);
 230                        map_kernel_page(vaddr, page_to_phys(page),
 231                                 pgprot_val(pgprot_noncached(PAGE_KERNEL)));
 232                        page++;
 233                        vaddr += PAGE_SIZE;
 234                } while (size -= PAGE_SIZE);
 235
 236                /*
 237                 * Free the otherwise unused pages.
 238                 */
 239                while (page < end) {
 240                        __free_page(page);
 241                        page++;
 242                }
 243
 244                return (void *)c->vm_start;
 245        }
 246
 247        if (page)
 248                __free_pages(page, order);
 249 no_page:
 250        return NULL;
 251}
 252EXPORT_SYMBOL(__dma_alloc_coherent);
 253
 254/*
 255 * free a page as defined by the above mapping.
 256 */
 257void __dma_free_coherent(size_t size, void *vaddr)
 258{
 259        struct ppc_vm_region *c;
 260        unsigned long flags, addr;
 261        
 262        size = PAGE_ALIGN(size);
 263
 264        spin_lock_irqsave(&consistent_lock, flags);
 265
 266        c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
 267        if (!c)
 268                goto no_area;
 269
 270        if ((c->vm_end - c->vm_start) != size) {
 271                printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
 272                       __func__, c->vm_end - c->vm_start, size);
 273                dump_stack();
 274                size = c->vm_end - c->vm_start;
 275        }
 276
 277        addr = c->vm_start;
 278        do {
 279                pte_t *ptep;
 280                unsigned long pfn;
 281
 282                ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
 283                                                               addr),
 284                                                    addr),
 285                                         addr);
 286                if (!pte_none(*ptep) && pte_present(*ptep)) {
 287                        pfn = pte_pfn(*ptep);
 288                        pte_clear(&init_mm, addr, ptep);
 289                        if (pfn_valid(pfn)) {
 290                                struct page *page = pfn_to_page(pfn);
 291                                __free_reserved_page(page);
 292                        }
 293                }
 294                addr += PAGE_SIZE;
 295        } while (size -= PAGE_SIZE);
 296
 297        flush_tlb_kernel_range(c->vm_start, c->vm_end);
 298
 299        list_del(&c->vm_list);
 300
 301        spin_unlock_irqrestore(&consistent_lock, flags);
 302
 303        kfree(c);
 304        return;
 305
 306 no_area:
 307        spin_unlock_irqrestore(&consistent_lock, flags);
 308        printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
 309               __func__, vaddr);
 310        dump_stack();
 311}
 312EXPORT_SYMBOL(__dma_free_coherent);
 313
 314/*
 315 * make an area consistent.
 316 */
 317void __dma_sync(void *vaddr, size_t size, int direction)
 318{
 319        unsigned long start = (unsigned long)vaddr;
 320        unsigned long end   = start + size;
 321
 322        switch (direction) {
 323        case DMA_NONE:
 324                BUG();
 325        case DMA_FROM_DEVICE:
 326                /*
 327                 * invalidate only when cache-line aligned otherwise there is
 328                 * the potential for discarding uncommitted data from the cache
 329                 */
 330                if ((start | end) & (L1_CACHE_BYTES - 1))
 331                        flush_dcache_range(start, end);
 332                else
 333                        invalidate_dcache_range(start, end);
 334                break;
 335        case DMA_TO_DEVICE:             /* writeback only */
 336                clean_dcache_range(start, end);
 337                break;
 338        case DMA_BIDIRECTIONAL: /* writeback and invalidate */
 339                flush_dcache_range(start, end);
 340                break;
 341        }
 342}
 343EXPORT_SYMBOL(__dma_sync);
 344
 345#ifdef CONFIG_HIGHMEM
 346/*
 347 * __dma_sync_page() implementation for systems using highmem.
 348 * In this case, each page of a buffer must be kmapped/kunmapped
 349 * in order to have a virtual address for __dma_sync(). This must
 350 * not sleep so kmap_atomic()/kunmap_atomic() are used.
 351 *
 352 * Note: yes, it is possible and correct to have a buffer extend
 353 * beyond the first page.
 354 */
 355static inline void __dma_sync_page_highmem(struct page *page,
 356                unsigned long offset, size_t size, int direction)
 357{
 358        size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
 359        size_t cur_size = seg_size;
 360        unsigned long flags, start, seg_offset = offset;
 361        int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
 362        int seg_nr = 0;
 363
 364        local_irq_save(flags);
 365
 366        do {
 367                start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
 368
 369                /* Sync this buffer segment */
 370                __dma_sync((void *)start, seg_size, direction);
 371                kunmap_atomic((void *)start);
 372                seg_nr++;
 373
 374                /* Calculate next buffer segment size */
 375                seg_size = min((size_t)PAGE_SIZE, size - cur_size);
 376
 377                /* Add the segment size to our running total */
 378                cur_size += seg_size;
 379                seg_offset = 0;
 380        } while (seg_nr < nr_segs);
 381
 382        local_irq_restore(flags);
 383}
 384#endif /* CONFIG_HIGHMEM */
 385
 386/*
 387 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
 388 * takes a struct page instead of a virtual address
 389 */
 390void __dma_sync_page(struct page *page, unsigned long offset,
 391        size_t size, int direction)
 392{
 393#ifdef CONFIG_HIGHMEM
 394        __dma_sync_page_highmem(page, offset, size, direction);
 395#else
 396        unsigned long start = (unsigned long)page_address(page) + offset;
 397        __dma_sync((void *)start, size, direction);
 398#endif
 399}
 400EXPORT_SYMBOL(__dma_sync_page);
 401
 402/*
 403 * Return the PFN for a given cpu virtual address returned by
 404 * __dma_alloc_coherent. This is used by dma_mmap_coherent()
 405 */
 406unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
 407{
 408        /* This should always be populated, so we don't test every
 409         * level. If that fails, we'll have a nice crash which
 410         * will be as good as a BUG_ON()
 411         */
 412        pgd_t *pgd = pgd_offset_k(cpu_addr);
 413        pud_t *pud = pud_offset(pgd, cpu_addr);
 414        pmd_t *pmd = pmd_offset(pud, cpu_addr);
 415        pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
 416
 417        if (pte_none(*ptep) || !pte_present(*ptep))
 418                return 0;
 419        return pte_pfn(*ptep);
 420}
 421