linux/arch/powerpc/kernel/iommu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
   3 * 
   4 * Rewrite, cleanup, new allocation schemes, virtual merging: 
   5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
   6 *               and  Ben. Herrenschmidt, IBM Corporation
   7 *
   8 * Dynamic DMA mapping support, bus-independent parts.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 * 
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 * 
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  23 */
  24
  25
  26#include <linux/init.h>
  27#include <linux/types.h>
  28#include <linux/slab.h>
  29#include <linux/mm.h>
  30#include <linux/spinlock.h>
  31#include <linux/string.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/bitops.h>
  34#include <linux/iommu-helper.h>
  35#include <linux/crash_dump.h>
  36#include <asm/io.h>
  37#include <asm/prom.h>
  38#include <asm/iommu.h>
  39#include <asm/pci-bridge.h>
  40#include <asm/machdep.h>
  41#include <asm/kdump.h>
  42
  43#define DBG(...)
  44
  45#ifdef CONFIG_IOMMU_VMERGE
  46static int novmerge = 0;
  47#else
  48static int novmerge = 1;
  49#endif
  50
  51static int protect4gb = 1;
  52
  53static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
  54
  55static int __init setup_protect4gb(char *str)
  56{
  57        if (strcmp(str, "on") == 0)
  58                protect4gb = 1;
  59        else if (strcmp(str, "off") == 0)
  60                protect4gb = 0;
  61
  62        return 1;
  63}
  64
  65static int __init setup_iommu(char *str)
  66{
  67        if (!strcmp(str, "novmerge"))
  68                novmerge = 1;
  69        else if (!strcmp(str, "vmerge"))
  70                novmerge = 0;
  71        return 1;
  72}
  73
  74__setup("protect4gb=", setup_protect4gb);
  75__setup("iommu=", setup_iommu);
  76
  77static unsigned long iommu_range_alloc(struct device *dev,
  78                                       struct iommu_table *tbl,
  79                                       unsigned long npages,
  80                                       unsigned long *handle,
  81                                       unsigned long mask,
  82                                       unsigned int align_order)
  83{ 
  84        unsigned long n, end, start;
  85        unsigned long limit;
  86        int largealloc = npages > 15;
  87        int pass = 0;
  88        unsigned long align_mask;
  89        unsigned long boundary_size;
  90
  91        align_mask = 0xffffffffffffffffl >> (64 - align_order);
  92
  93        /* This allocator was derived from x86_64's bit string search */
  94
  95        /* Sanity check */
  96        if (unlikely(npages == 0)) {
  97                if (printk_ratelimit())
  98                        WARN_ON(1);
  99                return DMA_ERROR_CODE;
 100        }
 101
 102        if (handle && *handle)
 103                start = *handle;
 104        else
 105                start = largealloc ? tbl->it_largehint : tbl->it_hint;
 106
 107        /* Use only half of the table for small allocs (15 pages or less) */
 108        limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
 109
 110        if (largealloc && start < tbl->it_halfpoint)
 111                start = tbl->it_halfpoint;
 112
 113        /* The case below can happen if we have a small segment appended
 114         * to a large, or when the previous alloc was at the very end of
 115         * the available space. If so, go back to the initial start.
 116         */
 117        if (start >= limit)
 118                start = largealloc ? tbl->it_largehint : tbl->it_hint;
 119
 120 again:
 121
 122        if (limit + tbl->it_offset > mask) {
 123                limit = mask - tbl->it_offset + 1;
 124                /* If we're constrained on address range, first try
 125                 * at the masked hint to avoid O(n) search complexity,
 126                 * but on second pass, start at 0.
 127                 */
 128                if ((start & mask) >= limit || pass > 0)
 129                        start = 0;
 130                else
 131                        start &= mask;
 132        }
 133
 134        if (dev)
 135                boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 136                                      1 << IOMMU_PAGE_SHIFT);
 137        else
 138                boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
 139        /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
 140
 141        n = iommu_area_alloc(tbl->it_map, limit, start, npages,
 142                             tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
 143                             align_mask);
 144        if (n == -1) {
 145                if (likely(pass < 2)) {
 146                        /* First failure, just rescan the half of the table.
 147                         * Second failure, rescan the other half of the table.
 148                         */
 149                        start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
 150                        limit = pass ? tbl->it_size : limit;
 151                        pass++;
 152                        goto again;
 153                } else {
 154                        /* Third failure, give up */
 155                        return DMA_ERROR_CODE;
 156                }
 157        }
 158
 159        end = n + npages;
 160
 161        /* Bump the hint to a new block for small allocs. */
 162        if (largealloc) {
 163                /* Don't bump to new block to avoid fragmentation */
 164                tbl->it_largehint = end;
 165        } else {
 166                /* Overflow will be taken care of at the next allocation */
 167                tbl->it_hint = (end + tbl->it_blocksize - 1) &
 168                                ~(tbl->it_blocksize - 1);
 169        }
 170
 171        /* Update handle for SG allocations */
 172        if (handle)
 173                *handle = end;
 174
 175        return n;
 176}
 177
 178static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
 179                              void *page, unsigned int npages,
 180                              enum dma_data_direction direction,
 181                              unsigned long mask, unsigned int align_order,
 182                              struct dma_attrs *attrs)
 183{
 184        unsigned long entry, flags;
 185        dma_addr_t ret = DMA_ERROR_CODE;
 186        int build_fail;
 187
 188        spin_lock_irqsave(&(tbl->it_lock), flags);
 189
 190        entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
 191
 192        if (unlikely(entry == DMA_ERROR_CODE)) {
 193                spin_unlock_irqrestore(&(tbl->it_lock), flags);
 194                return DMA_ERROR_CODE;
 195        }
 196
 197        entry += tbl->it_offset;        /* Offset into real TCE table */
 198        ret = entry << IOMMU_PAGE_SHIFT;        /* Set the return dma address */
 199
 200        /* Put the TCEs in the HW table */
 201        build_fail = ppc_md.tce_build(tbl, entry, npages,
 202                                      (unsigned long)page & IOMMU_PAGE_MASK,
 203                                      direction, attrs);
 204
 205        /* ppc_md.tce_build() only returns non-zero for transient errors.
 206         * Clean up the table bitmap in this case and return
 207         * DMA_ERROR_CODE. For all other errors the functionality is
 208         * not altered.
 209         */
 210        if (unlikely(build_fail)) {
 211                __iommu_free(tbl, ret, npages);
 212
 213                spin_unlock_irqrestore(&(tbl->it_lock), flags);
 214                return DMA_ERROR_CODE;
 215        }
 216
 217        /* Flush/invalidate TLB caches if necessary */
 218        if (ppc_md.tce_flush)
 219                ppc_md.tce_flush(tbl);
 220
 221        spin_unlock_irqrestore(&(tbl->it_lock), flags);
 222
 223        /* Make sure updates are seen by hardware */
 224        mb();
 225
 226        return ret;
 227}
 228
 229static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 
 230                         unsigned int npages)
 231{
 232        unsigned long entry, free_entry;
 233
 234        entry = dma_addr >> IOMMU_PAGE_SHIFT;
 235        free_entry = entry - tbl->it_offset;
 236
 237        if (((free_entry + npages) > tbl->it_size) ||
 238            (entry < tbl->it_offset)) {
 239                if (printk_ratelimit()) {
 240                        printk(KERN_INFO "iommu_free: invalid entry\n");
 241                        printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
 242                        printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
 243                        printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
 244                        printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
 245                        printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
 246                        printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
 247                        printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
 248                        WARN_ON(1);
 249                }
 250                return;
 251        }
 252
 253        ppc_md.tce_free(tbl, entry, npages);
 254        iommu_area_free(tbl->it_map, free_entry, npages);
 255}
 256
 257static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 258                unsigned int npages)
 259{
 260        unsigned long flags;
 261
 262        spin_lock_irqsave(&(tbl->it_lock), flags);
 263
 264        __iommu_free(tbl, dma_addr, npages);
 265
 266        /* Make sure TLB cache is flushed if the HW needs it. We do
 267         * not do an mb() here on purpose, it is not needed on any of
 268         * the current platforms.
 269         */
 270        if (ppc_md.tce_flush)
 271                ppc_md.tce_flush(tbl);
 272
 273        spin_unlock_irqrestore(&(tbl->it_lock), flags);
 274}
 275
 276int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 277                 struct scatterlist *sglist, int nelems,
 278                 unsigned long mask, enum dma_data_direction direction,
 279                 struct dma_attrs *attrs)
 280{
 281        dma_addr_t dma_next = 0, dma_addr;
 282        unsigned long flags;
 283        struct scatterlist *s, *outs, *segstart;
 284        int outcount, incount, i, build_fail = 0;
 285        unsigned int align;
 286        unsigned long handle;
 287        unsigned int max_seg_size;
 288
 289        BUG_ON(direction == DMA_NONE);
 290
 291        if ((nelems == 0) || !tbl)
 292                return 0;
 293
 294        outs = s = segstart = &sglist[0];
 295        outcount = 1;
 296        incount = nelems;
 297        handle = 0;
 298
 299        /* Init first segment length for backout at failure */
 300        outs->dma_length = 0;
 301
 302        DBG("sg mapping %d elements:\n", nelems);
 303
 304        spin_lock_irqsave(&(tbl->it_lock), flags);
 305
 306        max_seg_size = dma_get_max_seg_size(dev);
 307        for_each_sg(sglist, s, nelems, i) {
 308                unsigned long vaddr, npages, entry, slen;
 309
 310                slen = s->length;
 311                /* Sanity check */
 312                if (slen == 0) {
 313                        dma_next = 0;
 314                        continue;
 315                }
 316                /* Allocate iommu entries for that segment */
 317                vaddr = (unsigned long) sg_virt(s);
 318                npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
 319                align = 0;
 320                if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
 321                    (vaddr & ~PAGE_MASK) == 0)
 322                        align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
 323                entry = iommu_range_alloc(dev, tbl, npages, &handle,
 324                                          mask >> IOMMU_PAGE_SHIFT, align);
 325
 326                DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
 327
 328                /* Handle failure */
 329                if (unlikely(entry == DMA_ERROR_CODE)) {
 330                        if (printk_ratelimit())
 331                                printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
 332                                       " npages %lx\n", tbl, vaddr, npages);
 333                        goto failure;
 334                }
 335
 336                /* Convert entry to a dma_addr_t */
 337                entry += tbl->it_offset;
 338                dma_addr = entry << IOMMU_PAGE_SHIFT;
 339                dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
 340
 341                DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
 342                            npages, entry, dma_addr);
 343
 344                /* Insert into HW table */
 345                build_fail = ppc_md.tce_build(tbl, entry, npages,
 346                                              vaddr & IOMMU_PAGE_MASK,
 347                                              direction, attrs);
 348                if(unlikely(build_fail))
 349                        goto failure;
 350
 351                /* If we are in an open segment, try merging */
 352                if (segstart != s) {
 353                        DBG("  - trying merge...\n");
 354                        /* We cannot merge if:
 355                         * - allocated dma_addr isn't contiguous to previous allocation
 356                         */
 357                        if (novmerge || (dma_addr != dma_next) ||
 358                            (outs->dma_length + s->length > max_seg_size)) {
 359                                /* Can't merge: create a new segment */
 360                                segstart = s;
 361                                outcount++;
 362                                outs = sg_next(outs);
 363                                DBG("    can't merge, new segment.\n");
 364                        } else {
 365                                outs->dma_length += s->length;
 366                                DBG("    merged, new len: %ux\n", outs->dma_length);
 367                        }
 368                }
 369
 370                if (segstart == s) {
 371                        /* This is a new segment, fill entries */
 372                        DBG("  - filling new segment.\n");
 373                        outs->dma_address = dma_addr;
 374                        outs->dma_length = slen;
 375                }
 376
 377                /* Calculate next page pointer for contiguous check */
 378                dma_next = dma_addr + slen;
 379
 380                DBG("  - dma next is: %lx\n", dma_next);
 381        }
 382
 383        /* Flush/invalidate TLB caches if necessary */
 384        if (ppc_md.tce_flush)
 385                ppc_md.tce_flush(tbl);
 386
 387        spin_unlock_irqrestore(&(tbl->it_lock), flags);
 388
 389        DBG("mapped %d elements:\n", outcount);
 390
 391        /* For the sake of iommu_unmap_sg, we clear out the length in the
 392         * next entry of the sglist if we didn't fill the list completely
 393         */
 394        if (outcount < incount) {
 395                outs = sg_next(outs);
 396                outs->dma_address = DMA_ERROR_CODE;
 397                outs->dma_length = 0;
 398        }
 399
 400        /* Make sure updates are seen by hardware */
 401        mb();
 402
 403        return outcount;
 404
 405 failure:
 406        for_each_sg(sglist, s, nelems, i) {
 407                if (s->dma_length != 0) {
 408                        unsigned long vaddr, npages;
 409
 410                        vaddr = s->dma_address & IOMMU_PAGE_MASK;
 411                        npages = iommu_num_pages(s->dma_address, s->dma_length,
 412                                                 IOMMU_PAGE_SIZE);
 413                        __iommu_free(tbl, vaddr, npages);
 414                        s->dma_address = DMA_ERROR_CODE;
 415                        s->dma_length = 0;
 416                }
 417                if (s == outs)
 418                        break;
 419        }
 420        spin_unlock_irqrestore(&(tbl->it_lock), flags);
 421        return 0;
 422}
 423
 424
 425void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
 426                int nelems, enum dma_data_direction direction,
 427                struct dma_attrs *attrs)
 428{
 429        struct scatterlist *sg;
 430        unsigned long flags;
 431
 432        BUG_ON(direction == DMA_NONE);
 433
 434        if (!tbl)
 435                return;
 436
 437        spin_lock_irqsave(&(tbl->it_lock), flags);
 438
 439        sg = sglist;
 440        while (nelems--) {
 441                unsigned int npages;
 442                dma_addr_t dma_handle = sg->dma_address;
 443
 444                if (sg->dma_length == 0)
 445                        break;
 446                npages = iommu_num_pages(dma_handle, sg->dma_length,
 447                                         IOMMU_PAGE_SIZE);
 448                __iommu_free(tbl, dma_handle, npages);
 449                sg = sg_next(sg);
 450        }
 451
 452        /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
 453         * do not do an mb() here, the affected platforms do not need it
 454         * when freeing.
 455         */
 456        if (ppc_md.tce_flush)
 457                ppc_md.tce_flush(tbl);
 458
 459        spin_unlock_irqrestore(&(tbl->it_lock), flags);
 460}
 461
 462static void iommu_table_clear(struct iommu_table *tbl)
 463{
 464        if (!is_kdump_kernel()) {
 465                /* Clear the table in case firmware left allocations in it */
 466                ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
 467                return;
 468        }
 469
 470#ifdef CONFIG_CRASH_DUMP
 471        if (ppc_md.tce_get) {
 472                unsigned long index, tceval, tcecount = 0;
 473
 474                /* Reserve the existing mappings left by the first kernel. */
 475                for (index = 0; index < tbl->it_size; index++) {
 476                        tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
 477                        /*
 478                         * Freed TCE entry contains 0x7fffffffffffffff on JS20
 479                         */
 480                        if (tceval && (tceval != 0x7fffffffffffffffUL)) {
 481                                __set_bit(index, tbl->it_map);
 482                                tcecount++;
 483                        }
 484                }
 485
 486                if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
 487                        printk(KERN_WARNING "TCE table is full; freeing ");
 488                        printk(KERN_WARNING "%d entries for the kdump boot\n",
 489                                KDUMP_MIN_TCE_ENTRIES);
 490                        for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
 491                                index < tbl->it_size; index++)
 492                                __clear_bit(index, tbl->it_map);
 493                }
 494        }
 495#endif
 496}
 497
 498/*
 499 * Build a iommu_table structure.  This contains a bit map which
 500 * is used to manage allocation of the tce space.
 501 */
 502struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
 503{
 504        unsigned long sz;
 505        static int welcomed = 0;
 506        struct page *page;
 507
 508        /* Set aside 1/4 of the table for large allocations. */
 509        tbl->it_halfpoint = tbl->it_size * 3 / 4;
 510
 511        /* number of bytes needed for the bitmap */
 512        sz = (tbl->it_size + 7) >> 3;
 513
 514        page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
 515        if (!page)
 516                panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
 517        tbl->it_map = page_address(page);
 518        memset(tbl->it_map, 0, sz);
 519
 520        tbl->it_hint = 0;
 521        tbl->it_largehint = tbl->it_halfpoint;
 522        spin_lock_init(&tbl->it_lock);
 523
 524        iommu_table_clear(tbl);
 525
 526        if (!welcomed) {
 527                printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
 528                       novmerge ? "disabled" : "enabled");
 529                welcomed = 1;
 530        }
 531
 532        return tbl;
 533}
 534
 535void iommu_free_table(struct iommu_table *tbl, const char *node_name)
 536{
 537        unsigned long bitmap_sz, i;
 538        unsigned int order;
 539
 540        if (!tbl || !tbl->it_map) {
 541                printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
 542                                node_name);
 543                return;
 544        }
 545
 546        /* verify that table contains no entries */
 547        /* it_size is in entries, and we're examining 64 at a time */
 548        for (i = 0; i < (tbl->it_size/64); i++) {
 549                if (tbl->it_map[i] != 0) {
 550                        printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
 551                                __func__, node_name);
 552                        break;
 553                }
 554        }
 555
 556        /* calculate bitmap size in bytes */
 557        bitmap_sz = (tbl->it_size + 7) / 8;
 558
 559        /* free bitmap */
 560        order = get_order(bitmap_sz);
 561        free_pages((unsigned long) tbl->it_map, order);
 562
 563        /* free table */
 564        kfree(tbl);
 565}
 566
 567/* Creates TCEs for a user provided buffer.  The user buffer must be
 568 * contiguous real kernel storage (not vmalloc).  The address passed here
 569 * comprises a page address and offset into that page. The dma_addr_t
 570 * returned will point to the same byte within the page as was passed in.
 571 */
 572dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
 573                          struct page *page, unsigned long offset, size_t size,
 574                          unsigned long mask, enum dma_data_direction direction,
 575                          struct dma_attrs *attrs)
 576{
 577        dma_addr_t dma_handle = DMA_ERROR_CODE;
 578        void *vaddr;
 579        unsigned long uaddr;
 580        unsigned int npages, align;
 581
 582        BUG_ON(direction == DMA_NONE);
 583
 584        vaddr = page_address(page) + offset;
 585        uaddr = (unsigned long)vaddr;
 586        npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
 587
 588        if (tbl) {
 589                align = 0;
 590                if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
 591                    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
 592                        align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
 593
 594                dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
 595                                         mask >> IOMMU_PAGE_SHIFT, align,
 596                                         attrs);
 597                if (dma_handle == DMA_ERROR_CODE) {
 598                        if (printk_ratelimit())  {
 599                                printk(KERN_INFO "iommu_alloc failed, "
 600                                                "tbl %p vaddr %p npages %d\n",
 601                                                tbl, vaddr, npages);
 602                        }
 603                } else
 604                        dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
 605        }
 606
 607        return dma_handle;
 608}
 609
 610void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
 611                      size_t size, enum dma_data_direction direction,
 612                      struct dma_attrs *attrs)
 613{
 614        unsigned int npages;
 615
 616        BUG_ON(direction == DMA_NONE);
 617
 618        if (tbl) {
 619                npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
 620                iommu_free(tbl, dma_handle, npages);
 621        }
 622}
 623
 624/* Allocates a contiguous real buffer and creates mappings over it.
 625 * Returns the virtual address of the buffer and sets dma_handle
 626 * to the dma address (mapping) of the first page.
 627 */
 628void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
 629                           size_t size, dma_addr_t *dma_handle,
 630                           unsigned long mask, gfp_t flag, int node)
 631{
 632        void *ret = NULL;
 633        dma_addr_t mapping;
 634        unsigned int order;
 635        unsigned int nio_pages, io_order;
 636        struct page *page;
 637
 638        size = PAGE_ALIGN(size);
 639        order = get_order(size);
 640
 641        /*
 642         * Client asked for way too much space.  This is checked later
 643         * anyway.  It is easier to debug here for the drivers than in
 644         * the tce tables.
 645         */
 646        if (order >= IOMAP_MAX_ORDER) {
 647                printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
 648                return NULL;
 649        }
 650
 651        if (!tbl)
 652                return NULL;
 653
 654        /* Alloc enough pages (and possibly more) */
 655        page = alloc_pages_node(node, flag, order);
 656        if (!page)
 657                return NULL;
 658        ret = page_address(page);
 659        memset(ret, 0, size);
 660
 661        /* Set up tces to cover the allocated range */
 662        nio_pages = size >> IOMMU_PAGE_SHIFT;
 663        io_order = get_iommu_order(size);
 664        mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
 665                              mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
 666        if (mapping == DMA_ERROR_CODE) {
 667                free_pages((unsigned long)ret, order);
 668                return NULL;
 669        }
 670        *dma_handle = mapping;
 671        return ret;
 672}
 673
 674void iommu_free_coherent(struct iommu_table *tbl, size_t size,
 675                         void *vaddr, dma_addr_t dma_handle)
 676{
 677        if (tbl) {
 678                unsigned int nio_pages;
 679
 680                size = PAGE_ALIGN(size);
 681                nio_pages = size >> IOMMU_PAGE_SHIFT;
 682                iommu_free(tbl, dma_handle, nio_pages);
 683                size = PAGE_ALIGN(size);
 684                free_pages((unsigned long)vaddr, get_order(size));
 685        }
 686}
 687