linux/arch/powerpc/kernel/iommu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
   3 * 
   4 * Rewrite, cleanup, new allocation schemes, virtual merging: 
   5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
   6 *               and  Ben. Herrenschmidt, IBM Corporation
   7 *
   8 * Dynamic DMA mapping support, bus-independent parts.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 * 
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 * 
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  23 */
  24
  25
  26#include <linux/init.h>
  27#include <linux/types.h>
  28#include <linux/slab.h>
  29#include <linux/mm.h>
  30#include <linux/spinlock.h>
  31#include <linux/string.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/bitmap.h>
  34#include <linux/iommu-helper.h>
  35#include <linux/crash_dump.h>
  36#include <linux/hash.h>
  37#include <linux/fault-inject.h>
  38#include <linux/pci.h>
  39#include <linux/iommu.h>
  40#include <linux/sched.h>
  41#include <asm/io.h>
  42#include <asm/prom.h>
  43#include <asm/iommu.h>
  44#include <asm/pci-bridge.h>
  45#include <asm/machdep.h>
  46#include <asm/kdump.h>
  47#include <asm/fadump.h>
  48#include <asm/vio.h>
  49#include <asm/tce.h>
  50
  51#define DBG(...)
  52
  53static int novmerge;
  54
  55static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
  56
  57static int __init setup_iommu(char *str)
  58{
  59        if (!strcmp(str, "novmerge"))
  60                novmerge = 1;
  61        else if (!strcmp(str, "vmerge"))
  62                novmerge = 0;
  63        return 1;
  64}
  65
  66__setup("iommu=", setup_iommu);
  67
  68static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
  69
  70/*
  71 * We precalculate the hash to avoid doing it on every allocation.
  72 *
  73 * The hash is important to spread CPUs across all the pools. For example,
  74 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
  75 * with 4 pools all primary threads would map to the same pool.
  76 */
  77static int __init setup_iommu_pool_hash(void)
  78{
  79        unsigned int i;
  80
  81        for_each_possible_cpu(i)
  82                per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
  83
  84        return 0;
  85}
  86subsys_initcall(setup_iommu_pool_hash);
  87
  88#ifdef CONFIG_FAIL_IOMMU
  89
  90static DECLARE_FAULT_ATTR(fail_iommu);
  91
  92static int __init setup_fail_iommu(char *str)
  93{
  94        return setup_fault_attr(&fail_iommu, str);
  95}
  96__setup("fail_iommu=", setup_fail_iommu);
  97
  98static bool should_fail_iommu(struct device *dev)
  99{
 100        return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
 101}
 102
 103static int __init fail_iommu_debugfs(void)
 104{
 105        struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
 106                                                       NULL, &fail_iommu);
 107
 108        return PTR_ERR_OR_ZERO(dir);
 109}
 110late_initcall(fail_iommu_debugfs);
 111
 112static ssize_t fail_iommu_show(struct device *dev,
 113                               struct device_attribute *attr, char *buf)
 114{
 115        return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
 116}
 117
 118static ssize_t fail_iommu_store(struct device *dev,
 119                                struct device_attribute *attr, const char *buf,
 120                                size_t count)
 121{
 122        int i;
 123
 124        if (count > 0 && sscanf(buf, "%d", &i) > 0)
 125                dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
 126
 127        return count;
 128}
 129
 130static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
 131                   fail_iommu_store);
 132
 133static int fail_iommu_bus_notify(struct notifier_block *nb,
 134                                 unsigned long action, void *data)
 135{
 136        struct device *dev = data;
 137
 138        if (action == BUS_NOTIFY_ADD_DEVICE) {
 139                if (device_create_file(dev, &dev_attr_fail_iommu))
 140                        pr_warn("Unable to create IOMMU fault injection sysfs "
 141                                "entries\n");
 142        } else if (action == BUS_NOTIFY_DEL_DEVICE) {
 143                device_remove_file(dev, &dev_attr_fail_iommu);
 144        }
 145
 146        return 0;
 147}
 148
 149static struct notifier_block fail_iommu_bus_notifier = {
 150        .notifier_call = fail_iommu_bus_notify
 151};
 152
 153static int __init fail_iommu_setup(void)
 154{
 155#ifdef CONFIG_PCI
 156        bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
 157#endif
 158#ifdef CONFIG_IBMVIO
 159        bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
 160#endif
 161
 162        return 0;
 163}
 164/*
 165 * Must execute after PCI and VIO subsystem have initialised but before
 166 * devices are probed.
 167 */
 168arch_initcall(fail_iommu_setup);
 169#else
 170static inline bool should_fail_iommu(struct device *dev)
 171{
 172        return false;
 173}
 174#endif
 175
 176static unsigned long iommu_range_alloc(struct device *dev,
 177                                       struct iommu_table *tbl,
 178                                       unsigned long npages,
 179                                       unsigned long *handle,
 180                                       unsigned long mask,
 181                                       unsigned int align_order)
 182{ 
 183        unsigned long n, end, start;
 184        unsigned long limit;
 185        int largealloc = npages > 15;
 186        int pass = 0;
 187        unsigned long align_mask;
 188        unsigned long boundary_size;
 189        unsigned long flags;
 190        unsigned int pool_nr;
 191        struct iommu_pool *pool;
 192
 193        align_mask = 0xffffffffffffffffl >> (64 - align_order);
 194
 195        /* This allocator was derived from x86_64's bit string search */
 196
 197        /* Sanity check */
 198        if (unlikely(npages == 0)) {
 199                if (printk_ratelimit())
 200                        WARN_ON(1);
 201                return DMA_ERROR_CODE;
 202        }
 203
 204        if (should_fail_iommu(dev))
 205                return DMA_ERROR_CODE;
 206
 207        /*
 208         * We don't need to disable preemption here because any CPU can
 209         * safely use any IOMMU pool.
 210         */
 211        pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
 212
 213        if (largealloc)
 214                pool = &(tbl->large_pool);
 215        else
 216                pool = &(tbl->pools[pool_nr]);
 217
 218        spin_lock_irqsave(&(pool->lock), flags);
 219
 220again:
 221        if ((pass == 0) && handle && *handle &&
 222            (*handle >= pool->start) && (*handle < pool->end))
 223                start = *handle;
 224        else
 225                start = pool->hint;
 226
 227        limit = pool->end;
 228
 229        /* The case below can happen if we have a small segment appended
 230         * to a large, or when the previous alloc was at the very end of
 231         * the available space. If so, go back to the initial start.
 232         */
 233        if (start >= limit)
 234                start = pool->start;
 235
 236        if (limit + tbl->it_offset > mask) {
 237                limit = mask - tbl->it_offset + 1;
 238                /* If we're constrained on address range, first try
 239                 * at the masked hint to avoid O(n) search complexity,
 240                 * but on second pass, start at 0 in pool 0.
 241                 */
 242                if ((start & mask) >= limit || pass > 0) {
 243                        spin_unlock(&(pool->lock));
 244                        pool = &(tbl->pools[0]);
 245                        spin_lock(&(pool->lock));
 246                        start = pool->start;
 247                } else {
 248                        start &= mask;
 249                }
 250        }
 251
 252        if (dev)
 253                boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 254                                      1 << tbl->it_page_shift);
 255        else
 256                boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
 257        /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
 258
 259        n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
 260                             boundary_size >> tbl->it_page_shift, align_mask);
 261        if (n == -1) {
 262                if (likely(pass == 0)) {
 263                        /* First try the pool from the start */
 264                        pool->hint = pool->start;
 265                        pass++;
 266                        goto again;
 267
 268                } else if (pass <= tbl->nr_pools) {
 269                        /* Now try scanning all the other pools */
 270                        spin_unlock(&(pool->lock));
 271                        pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
 272                        pool = &tbl->pools[pool_nr];
 273                        spin_lock(&(pool->lock));
 274                        pool->hint = pool->start;
 275                        pass++;
 276                        goto again;
 277
 278                } else {
 279                        /* Give up */
 280                        spin_unlock_irqrestore(&(pool->lock), flags);
 281                        return DMA_ERROR_CODE;
 282                }
 283        }
 284
 285        end = n + npages;
 286
 287        /* Bump the hint to a new block for small allocs. */
 288        if (largealloc) {
 289                /* Don't bump to new block to avoid fragmentation */
 290                pool->hint = end;
 291        } else {
 292                /* Overflow will be taken care of at the next allocation */
 293                pool->hint = (end + tbl->it_blocksize - 1) &
 294                                ~(tbl->it_blocksize - 1);
 295        }
 296
 297        /* Update handle for SG allocations */
 298        if (handle)
 299                *handle = end;
 300
 301        spin_unlock_irqrestore(&(pool->lock), flags);
 302
 303        return n;
 304}
 305
 306static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
 307                              void *page, unsigned int npages,
 308                              enum dma_data_direction direction,
 309                              unsigned long mask, unsigned int align_order,
 310                              unsigned long attrs)
 311{
 312        unsigned long entry;
 313        dma_addr_t ret = DMA_ERROR_CODE;
 314        int build_fail;
 315
 316        entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
 317
 318        if (unlikely(entry == DMA_ERROR_CODE))
 319                return DMA_ERROR_CODE;
 320
 321        entry += tbl->it_offset;        /* Offset into real TCE table */
 322        ret = entry << tbl->it_page_shift;      /* Set the return dma address */
 323
 324        /* Put the TCEs in the HW table */
 325        build_fail = tbl->it_ops->set(tbl, entry, npages,
 326                                      (unsigned long)page &
 327                                      IOMMU_PAGE_MASK(tbl), direction, attrs);
 328
 329        /* tbl->it_ops->set() only returns non-zero for transient errors.
 330         * Clean up the table bitmap in this case and return
 331         * DMA_ERROR_CODE. For all other errors the functionality is
 332         * not altered.
 333         */
 334        if (unlikely(build_fail)) {
 335                __iommu_free(tbl, ret, npages);
 336                return DMA_ERROR_CODE;
 337        }
 338
 339        /* Flush/invalidate TLB caches if necessary */
 340        if (tbl->it_ops->flush)
 341                tbl->it_ops->flush(tbl);
 342
 343        /* Make sure updates are seen by hardware */
 344        mb();
 345
 346        return ret;
 347}
 348
 349static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
 350                             unsigned int npages)
 351{
 352        unsigned long entry, free_entry;
 353
 354        entry = dma_addr >> tbl->it_page_shift;
 355        free_entry = entry - tbl->it_offset;
 356
 357        if (((free_entry + npages) > tbl->it_size) ||
 358            (entry < tbl->it_offset)) {
 359                if (printk_ratelimit()) {
 360                        printk(KERN_INFO "iommu_free: invalid entry\n");
 361                        printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
 362                        printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
 363                        printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
 364                        printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
 365                        printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
 366                        printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
 367                        printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
 368                        WARN_ON(1);
 369                }
 370
 371                return false;
 372        }
 373
 374        return true;
 375}
 376
 377static struct iommu_pool *get_pool(struct iommu_table *tbl,
 378                                   unsigned long entry)
 379{
 380        struct iommu_pool *p;
 381        unsigned long largepool_start = tbl->large_pool.start;
 382
 383        /* The large pool is the last pool at the top of the table */
 384        if (entry >= largepool_start) {
 385                p = &tbl->large_pool;
 386        } else {
 387                unsigned int pool_nr = entry / tbl->poolsize;
 388
 389                BUG_ON(pool_nr > tbl->nr_pools);
 390                p = &tbl->pools[pool_nr];
 391        }
 392
 393        return p;
 394}
 395
 396static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 397                         unsigned int npages)
 398{
 399        unsigned long entry, free_entry;
 400        unsigned long flags;
 401        struct iommu_pool *pool;
 402
 403        entry = dma_addr >> tbl->it_page_shift;
 404        free_entry = entry - tbl->it_offset;
 405
 406        pool = get_pool(tbl, free_entry);
 407
 408        if (!iommu_free_check(tbl, dma_addr, npages))
 409                return;
 410
 411        tbl->it_ops->clear(tbl, entry, npages);
 412
 413        spin_lock_irqsave(&(pool->lock), flags);
 414        bitmap_clear(tbl->it_map, free_entry, npages);
 415        spin_unlock_irqrestore(&(pool->lock), flags);
 416}
 417
 418static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 419                unsigned int npages)
 420{
 421        __iommu_free(tbl, dma_addr, npages);
 422
 423        /* Make sure TLB cache is flushed if the HW needs it. We do
 424         * not do an mb() here on purpose, it is not needed on any of
 425         * the current platforms.
 426         */
 427        if (tbl->it_ops->flush)
 428                tbl->it_ops->flush(tbl);
 429}
 430
 431int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 432                     struct scatterlist *sglist, int nelems,
 433                     unsigned long mask, enum dma_data_direction direction,
 434                     unsigned long attrs)
 435{
 436        dma_addr_t dma_next = 0, dma_addr;
 437        struct scatterlist *s, *outs, *segstart;
 438        int outcount, incount, i, build_fail = 0;
 439        unsigned int align;
 440        unsigned long handle;
 441        unsigned int max_seg_size;
 442
 443        BUG_ON(direction == DMA_NONE);
 444
 445        if ((nelems == 0) || !tbl)
 446                return 0;
 447
 448        outs = s = segstart = &sglist[0];
 449        outcount = 1;
 450        incount = nelems;
 451        handle = 0;
 452
 453        /* Init first segment length for backout at failure */
 454        outs->dma_length = 0;
 455
 456        DBG("sg mapping %d elements:\n", nelems);
 457
 458        max_seg_size = dma_get_max_seg_size(dev);
 459        for_each_sg(sglist, s, nelems, i) {
 460                unsigned long vaddr, npages, entry, slen;
 461
 462                slen = s->length;
 463                /* Sanity check */
 464                if (slen == 0) {
 465                        dma_next = 0;
 466                        continue;
 467                }
 468                /* Allocate iommu entries for that segment */
 469                vaddr = (unsigned long) sg_virt(s);
 470                npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
 471                align = 0;
 472                if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
 473                    (vaddr & ~PAGE_MASK) == 0)
 474                        align = PAGE_SHIFT - tbl->it_page_shift;
 475                entry = iommu_range_alloc(dev, tbl, npages, &handle,
 476                                          mask >> tbl->it_page_shift, align);
 477
 478                DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
 479
 480                /* Handle failure */
 481                if (unlikely(entry == DMA_ERROR_CODE)) {
 482                        if (!(attrs & DMA_ATTR_NO_WARN) &&
 483                            printk_ratelimit())
 484                                dev_info(dev, "iommu_alloc failed, tbl %p "
 485                                         "vaddr %lx npages %lu\n", tbl, vaddr,
 486                                         npages);
 487                        goto failure;
 488                }
 489
 490                /* Convert entry to a dma_addr_t */
 491                entry += tbl->it_offset;
 492                dma_addr = entry << tbl->it_page_shift;
 493                dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
 494
 495                DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
 496                            npages, entry, dma_addr);
 497
 498                /* Insert into HW table */
 499                build_fail = tbl->it_ops->set(tbl, entry, npages,
 500                                              vaddr & IOMMU_PAGE_MASK(tbl),
 501                                              direction, attrs);
 502                if(unlikely(build_fail))
 503                        goto failure;
 504
 505                /* If we are in an open segment, try merging */
 506                if (segstart != s) {
 507                        DBG("  - trying merge...\n");
 508                        /* We cannot merge if:
 509                         * - allocated dma_addr isn't contiguous to previous allocation
 510                         */
 511                        if (novmerge || (dma_addr != dma_next) ||
 512                            (outs->dma_length + s->length > max_seg_size)) {
 513                                /* Can't merge: create a new segment */
 514                                segstart = s;
 515                                outcount++;
 516                                outs = sg_next(outs);
 517                                DBG("    can't merge, new segment.\n");
 518                        } else {
 519                                outs->dma_length += s->length;
 520                                DBG("    merged, new len: %ux\n", outs->dma_length);
 521                        }
 522                }
 523
 524                if (segstart == s) {
 525                        /* This is a new segment, fill entries */
 526                        DBG("  - filling new segment.\n");
 527                        outs->dma_address = dma_addr;
 528                        outs->dma_length = slen;
 529                }
 530
 531                /* Calculate next page pointer for contiguous check */
 532                dma_next = dma_addr + slen;
 533
 534                DBG("  - dma next is: %lx\n", dma_next);
 535        }
 536
 537        /* Flush/invalidate TLB caches if necessary */
 538        if (tbl->it_ops->flush)
 539                tbl->it_ops->flush(tbl);
 540
 541        DBG("mapped %d elements:\n", outcount);
 542
 543        /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
 544         * next entry of the sglist if we didn't fill the list completely
 545         */
 546        if (outcount < incount) {
 547                outs = sg_next(outs);
 548                outs->dma_address = DMA_ERROR_CODE;
 549                outs->dma_length = 0;
 550        }
 551
 552        /* Make sure updates are seen by hardware */
 553        mb();
 554
 555        return outcount;
 556
 557 failure:
 558        for_each_sg(sglist, s, nelems, i) {
 559                if (s->dma_length != 0) {
 560                        unsigned long vaddr, npages;
 561
 562                        vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
 563                        npages = iommu_num_pages(s->dma_address, s->dma_length,
 564                                                 IOMMU_PAGE_SIZE(tbl));
 565                        __iommu_free(tbl, vaddr, npages);
 566                        s->dma_address = DMA_ERROR_CODE;
 567                        s->dma_length = 0;
 568                }
 569                if (s == outs)
 570                        break;
 571        }
 572        return 0;
 573}
 574
 575
 576void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
 577                        int nelems, enum dma_data_direction direction,
 578                        unsigned long attrs)
 579{
 580        struct scatterlist *sg;
 581
 582        BUG_ON(direction == DMA_NONE);
 583
 584        if (!tbl)
 585                return;
 586
 587        sg = sglist;
 588        while (nelems--) {
 589                unsigned int npages;
 590                dma_addr_t dma_handle = sg->dma_address;
 591
 592                if (sg->dma_length == 0)
 593                        break;
 594                npages = iommu_num_pages(dma_handle, sg->dma_length,
 595                                         IOMMU_PAGE_SIZE(tbl));
 596                __iommu_free(tbl, dma_handle, npages);
 597                sg = sg_next(sg);
 598        }
 599
 600        /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
 601         * do not do an mb() here, the affected platforms do not need it
 602         * when freeing.
 603         */
 604        if (tbl->it_ops->flush)
 605                tbl->it_ops->flush(tbl);
 606}
 607
 608static void iommu_table_clear(struct iommu_table *tbl)
 609{
 610        /*
 611         * In case of firmware assisted dump system goes through clean
 612         * reboot process at the time of system crash. Hence it's safe to
 613         * clear the TCE entries if firmware assisted dump is active.
 614         */
 615        if (!is_kdump_kernel() || is_fadump_active()) {
 616                /* Clear the table in case firmware left allocations in it */
 617                tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
 618                return;
 619        }
 620
 621#ifdef CONFIG_CRASH_DUMP
 622        if (tbl->it_ops->get) {
 623                unsigned long index, tceval, tcecount = 0;
 624
 625                /* Reserve the existing mappings left by the first kernel. */
 626                for (index = 0; index < tbl->it_size; index++) {
 627                        tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
 628                        /*
 629                         * Freed TCE entry contains 0x7fffffffffffffff on JS20
 630                         */
 631                        if (tceval && (tceval != 0x7fffffffffffffffUL)) {
 632                                __set_bit(index, tbl->it_map);
 633                                tcecount++;
 634                        }
 635                }
 636
 637                if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
 638                        printk(KERN_WARNING "TCE table is full; freeing ");
 639                        printk(KERN_WARNING "%d entries for the kdump boot\n",
 640                                KDUMP_MIN_TCE_ENTRIES);
 641                        for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
 642                                index < tbl->it_size; index++)
 643                                __clear_bit(index, tbl->it_map);
 644                }
 645        }
 646#endif
 647}
 648
 649/*
 650 * Build a iommu_table structure.  This contains a bit map which
 651 * is used to manage allocation of the tce space.
 652 */
 653struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
 654{
 655        unsigned long sz;
 656        static int welcomed = 0;
 657        struct page *page;
 658        unsigned int i;
 659        struct iommu_pool *p;
 660
 661        BUG_ON(!tbl->it_ops);
 662
 663        /* number of bytes needed for the bitmap */
 664        sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 665
 666        page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
 667        if (!page)
 668                panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
 669        tbl->it_map = page_address(page);
 670        memset(tbl->it_map, 0, sz);
 671
 672        /*
 673         * Reserve page 0 so it will not be used for any mappings.
 674         * This avoids buggy drivers that consider page 0 to be invalid
 675         * to crash the machine or even lose data.
 676         */
 677        if (tbl->it_offset == 0)
 678                set_bit(0, tbl->it_map);
 679
 680        /* We only split the IOMMU table if we have 1GB or more of space */
 681        if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
 682                tbl->nr_pools = IOMMU_NR_POOLS;
 683        else
 684                tbl->nr_pools = 1;
 685
 686        /* We reserve the top 1/4 of the table for large allocations */
 687        tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
 688
 689        for (i = 0; i < tbl->nr_pools; i++) {
 690                p = &tbl->pools[i];
 691                spin_lock_init(&(p->lock));
 692                p->start = tbl->poolsize * i;
 693                p->hint = p->start;
 694                p->end = p->start + tbl->poolsize;
 695        }
 696
 697        p = &tbl->large_pool;
 698        spin_lock_init(&(p->lock));
 699        p->start = tbl->poolsize * i;
 700        p->hint = p->start;
 701        p->end = tbl->it_size;
 702
 703        iommu_table_clear(tbl);
 704
 705        if (!welcomed) {
 706                printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
 707                       novmerge ? "disabled" : "enabled");
 708                welcomed = 1;
 709        }
 710
 711        return tbl;
 712}
 713
 714void iommu_free_table(struct iommu_table *tbl, const char *node_name)
 715{
 716        unsigned long bitmap_sz;
 717        unsigned int order;
 718
 719        if (!tbl)
 720                return;
 721
 722        if (!tbl->it_map) {
 723                kfree(tbl);
 724                return;
 725        }
 726
 727        /*
 728         * In case we have reserved the first bit, we should not emit
 729         * the warning below.
 730         */
 731        if (tbl->it_offset == 0)
 732                clear_bit(0, tbl->it_map);
 733
 734        /* verify that table contains no entries */
 735        if (!bitmap_empty(tbl->it_map, tbl->it_size))
 736                pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
 737
 738        /* calculate bitmap size in bytes */
 739        bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 740
 741        /* free bitmap */
 742        order = get_order(bitmap_sz);
 743        free_pages((unsigned long) tbl->it_map, order);
 744
 745        /* free table */
 746        kfree(tbl);
 747}
 748
 749/* Creates TCEs for a user provided buffer.  The user buffer must be
 750 * contiguous real kernel storage (not vmalloc).  The address passed here
 751 * comprises a page address and offset into that page. The dma_addr_t
 752 * returned will point to the same byte within the page as was passed in.
 753 */
 754dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
 755                          struct page *page, unsigned long offset, size_t size,
 756                          unsigned long mask, enum dma_data_direction direction,
 757                          unsigned long attrs)
 758{
 759        dma_addr_t dma_handle = DMA_ERROR_CODE;
 760        void *vaddr;
 761        unsigned long uaddr;
 762        unsigned int npages, align;
 763
 764        BUG_ON(direction == DMA_NONE);
 765
 766        vaddr = page_address(page) + offset;
 767        uaddr = (unsigned long)vaddr;
 768        npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
 769
 770        if (tbl) {
 771                align = 0;
 772                if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
 773                    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
 774                        align = PAGE_SHIFT - tbl->it_page_shift;
 775
 776                dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
 777                                         mask >> tbl->it_page_shift, align,
 778                                         attrs);
 779                if (dma_handle == DMA_ERROR_CODE) {
 780                        if (!(attrs & DMA_ATTR_NO_WARN) &&
 781                            printk_ratelimit())  {
 782                                dev_info(dev, "iommu_alloc failed, tbl %p "
 783                                         "vaddr %p npages %d\n", tbl, vaddr,
 784                                         npages);
 785                        }
 786                } else
 787                        dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
 788        }
 789
 790        return dma_handle;
 791}
 792
 793void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
 794                      size_t size, enum dma_data_direction direction,
 795                      unsigned long attrs)
 796{
 797        unsigned int npages;
 798
 799        BUG_ON(direction == DMA_NONE);
 800
 801        if (tbl) {
 802                npages = iommu_num_pages(dma_handle, size,
 803                                         IOMMU_PAGE_SIZE(tbl));
 804                iommu_free(tbl, dma_handle, npages);
 805        }
 806}
 807
 808/* Allocates a contiguous real buffer and creates mappings over it.
 809 * Returns the virtual address of the buffer and sets dma_handle
 810 * to the dma address (mapping) of the first page.
 811 */
 812void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
 813                           size_t size, dma_addr_t *dma_handle,
 814                           unsigned long mask, gfp_t flag, int node)
 815{
 816        void *ret = NULL;
 817        dma_addr_t mapping;
 818        unsigned int order;
 819        unsigned int nio_pages, io_order;
 820        struct page *page;
 821
 822        size = PAGE_ALIGN(size);
 823        order = get_order(size);
 824
 825        /*
 826         * Client asked for way too much space.  This is checked later
 827         * anyway.  It is easier to debug here for the drivers than in
 828         * the tce tables.
 829         */
 830        if (order >= IOMAP_MAX_ORDER) {
 831                dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
 832                         size);
 833                return NULL;
 834        }
 835
 836        if (!tbl)
 837                return NULL;
 838
 839        /* Alloc enough pages (and possibly more) */
 840        page = alloc_pages_node(node, flag, order);
 841        if (!page)
 842                return NULL;
 843        ret = page_address(page);
 844        memset(ret, 0, size);
 845
 846        /* Set up tces to cover the allocated range */
 847        nio_pages = size >> tbl->it_page_shift;
 848        io_order = get_iommu_order(size, tbl);
 849        mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
 850                              mask >> tbl->it_page_shift, io_order, 0);
 851        if (mapping == DMA_ERROR_CODE) {
 852                free_pages((unsigned long)ret, order);
 853                return NULL;
 854        }
 855        *dma_handle = mapping;
 856        return ret;
 857}
 858
 859void iommu_free_coherent(struct iommu_table *tbl, size_t size,
 860                         void *vaddr, dma_addr_t dma_handle)
 861{
 862        if (tbl) {
 863                unsigned int nio_pages;
 864
 865                size = PAGE_ALIGN(size);
 866                nio_pages = size >> tbl->it_page_shift;
 867                iommu_free(tbl, dma_handle, nio_pages);
 868                size = PAGE_ALIGN(size);
 869                free_pages((unsigned long)vaddr, get_order(size));
 870        }
 871}
 872
 873unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
 874{
 875        switch (dir) {
 876        case DMA_BIDIRECTIONAL:
 877                return TCE_PCI_READ | TCE_PCI_WRITE;
 878        case DMA_FROM_DEVICE:
 879                return TCE_PCI_WRITE;
 880        case DMA_TO_DEVICE:
 881                return TCE_PCI_READ;
 882        default:
 883                return 0;
 884        }
 885}
 886EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
 887
 888#ifdef CONFIG_IOMMU_API
 889/*
 890 * SPAPR TCE API
 891 */
 892static void group_release(void *iommu_data)
 893{
 894        struct iommu_table_group *table_group = iommu_data;
 895
 896        table_group->group = NULL;
 897}
 898
 899void iommu_register_group(struct iommu_table_group *table_group,
 900                int pci_domain_number, unsigned long pe_num)
 901{
 902        struct iommu_group *grp;
 903        char *name;
 904
 905        grp = iommu_group_alloc();
 906        if (IS_ERR(grp)) {
 907                pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
 908                                PTR_ERR(grp));
 909                return;
 910        }
 911        table_group->group = grp;
 912        iommu_group_set_iommudata(grp, table_group, group_release);
 913        name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
 914                        pci_domain_number, pe_num);
 915        if (!name)
 916                return;
 917        iommu_group_set_name(grp, name);
 918        kfree(name);
 919}
 920
 921enum dma_data_direction iommu_tce_direction(unsigned long tce)
 922{
 923        if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
 924                return DMA_BIDIRECTIONAL;
 925        else if (tce & TCE_PCI_READ)
 926                return DMA_TO_DEVICE;
 927        else if (tce & TCE_PCI_WRITE)
 928                return DMA_FROM_DEVICE;
 929        else
 930                return DMA_NONE;
 931}
 932EXPORT_SYMBOL_GPL(iommu_tce_direction);
 933
 934void iommu_flush_tce(struct iommu_table *tbl)
 935{
 936        /* Flush/invalidate TLB caches if necessary */
 937        if (tbl->it_ops->flush)
 938                tbl->it_ops->flush(tbl);
 939
 940        /* Make sure updates are seen by hardware */
 941        mb();
 942}
 943EXPORT_SYMBOL_GPL(iommu_flush_tce);
 944
 945int iommu_tce_clear_param_check(struct iommu_table *tbl,
 946                unsigned long ioba, unsigned long tce_value,
 947                unsigned long npages)
 948{
 949        /* tbl->it_ops->clear() does not support any value but 0 */
 950        if (tce_value)
 951                return -EINVAL;
 952
 953        if (ioba & ~IOMMU_PAGE_MASK(tbl))
 954                return -EINVAL;
 955
 956        ioba >>= tbl->it_page_shift;
 957        if (ioba < tbl->it_offset)
 958                return -EINVAL;
 959
 960        if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
 961                return -EINVAL;
 962
 963        return 0;
 964}
 965EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);
 966
 967int iommu_tce_put_param_check(struct iommu_table *tbl,
 968                unsigned long ioba, unsigned long tce)
 969{
 970        if (tce & ~IOMMU_PAGE_MASK(tbl))
 971                return -EINVAL;
 972
 973        if (ioba & ~IOMMU_PAGE_MASK(tbl))
 974                return -EINVAL;
 975
 976        ioba >>= tbl->it_page_shift;
 977        if (ioba < tbl->it_offset)
 978                return -EINVAL;
 979
 980        if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
 981                return -EINVAL;
 982
 983        return 0;
 984}
 985EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);
 986
 987long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
 988                unsigned long *hpa, enum dma_data_direction *direction)
 989{
 990        long ret;
 991
 992        ret = tbl->it_ops->exchange(tbl, entry, hpa, direction);
 993
 994        if (!ret && ((*direction == DMA_FROM_DEVICE) ||
 995                        (*direction == DMA_BIDIRECTIONAL)))
 996                SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
 997
 998        /* if (unlikely(ret))
 999                pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
1000                        __func__, hwaddr, entry << tbl->it_page_shift,
1001                                hwaddr, ret); */
1002
1003        return ret;
1004}
1005EXPORT_SYMBOL_GPL(iommu_tce_xchg);
1006
1007int iommu_take_ownership(struct iommu_table *tbl)
1008{
1009        unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1010        int ret = 0;
1011
1012        /*
1013         * VFIO does not control TCE entries allocation and the guest
1014         * can write new TCEs on top of existing ones so iommu_tce_build()
1015         * must be able to release old pages. This functionality
1016         * requires exchange() callback defined so if it is not
1017         * implemented, we disallow taking ownership over the table.
1018         */
1019        if (!tbl->it_ops->exchange)
1020                return -EINVAL;
1021
1022        spin_lock_irqsave(&tbl->large_pool.lock, flags);
1023        for (i = 0; i < tbl->nr_pools; i++)
1024                spin_lock(&tbl->pools[i].lock);
1025
1026        if (tbl->it_offset == 0)
1027                clear_bit(0, tbl->it_map);
1028
1029        if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1030                pr_err("iommu_tce: it_map is not empty");
1031                ret = -EBUSY;
1032                /* Restore bit#0 set by iommu_init_table() */
1033                if (tbl->it_offset == 0)
1034                        set_bit(0, tbl->it_map);
1035        } else {
1036                memset(tbl->it_map, 0xff, sz);
1037        }
1038
1039        for (i = 0; i < tbl->nr_pools; i++)
1040                spin_unlock(&tbl->pools[i].lock);
1041        spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1042
1043        return ret;
1044}
1045EXPORT_SYMBOL_GPL(iommu_take_ownership);
1046
1047void iommu_release_ownership(struct iommu_table *tbl)
1048{
1049        unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1050
1051        spin_lock_irqsave(&tbl->large_pool.lock, flags);
1052        for (i = 0; i < tbl->nr_pools; i++)
1053                spin_lock(&tbl->pools[i].lock);
1054
1055        memset(tbl->it_map, 0, sz);
1056
1057        /* Restore bit#0 set by iommu_init_table() */
1058        if (tbl->it_offset == 0)
1059                set_bit(0, tbl->it_map);
1060
1061        for (i = 0; i < tbl->nr_pools; i++)
1062                spin_unlock(&tbl->pools[i].lock);
1063        spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1064}
1065EXPORT_SYMBOL_GPL(iommu_release_ownership);
1066
1067int iommu_add_device(struct device *dev)
1068{
1069        struct iommu_table *tbl;
1070        struct iommu_table_group_link *tgl;
1071
1072        /*
1073         * The sysfs entries should be populated before
1074         * binding IOMMU group. If sysfs entries isn't
1075         * ready, we simply bail.
1076         */
1077        if (!device_is_registered(dev))
1078                return -ENOENT;
1079
1080        if (dev->iommu_group) {
1081                pr_debug("%s: Skipping device %s with iommu group %d\n",
1082                         __func__, dev_name(dev),
1083                         iommu_group_id(dev->iommu_group));
1084                return -EBUSY;
1085        }
1086
1087        tbl = get_iommu_table_base(dev);
1088        if (!tbl) {
1089                pr_debug("%s: Skipping device %s with no tbl\n",
1090                         __func__, dev_name(dev));
1091                return 0;
1092        }
1093
1094        tgl = list_first_entry_or_null(&tbl->it_group_list,
1095                        struct iommu_table_group_link, next);
1096        if (!tgl) {
1097                pr_debug("%s: Skipping device %s with no group\n",
1098                         __func__, dev_name(dev));
1099                return 0;
1100        }
1101        pr_debug("%s: Adding %s to iommu group %d\n",
1102                 __func__, dev_name(dev),
1103                 iommu_group_id(tgl->table_group->group));
1104
1105        if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
1106                pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
1107                       __func__, IOMMU_PAGE_SIZE(tbl),
1108                       PAGE_SIZE, dev_name(dev));
1109                return -EINVAL;
1110        }
1111
1112        return iommu_group_add_device(tgl->table_group->group, dev);
1113}
1114EXPORT_SYMBOL_GPL(iommu_add_device);
1115
1116void iommu_del_device(struct device *dev)
1117{
1118        /*
1119         * Some devices might not have IOMMU table and group
1120         * and we needn't detach them from the associated
1121         * IOMMU groups
1122         */
1123        if (!dev->iommu_group) {
1124                pr_debug("iommu_tce: skipping device %s with no tbl\n",
1125                         dev_name(dev));
1126                return;
1127        }
1128
1129        iommu_group_remove_device(dev);
1130}
1131EXPORT_SYMBOL_GPL(iommu_del_device);
1132
1133static int tce_iommu_bus_notifier(struct notifier_block *nb,
1134                unsigned long action, void *data)
1135{
1136        struct device *dev = data;
1137
1138        switch (action) {
1139        case BUS_NOTIFY_ADD_DEVICE:
1140                return iommu_add_device(dev);
1141        case BUS_NOTIFY_DEL_DEVICE:
1142                if (dev->iommu_group)
1143                        iommu_del_device(dev);
1144                return 0;
1145        default:
1146                return 0;
1147        }
1148}
1149
1150static struct notifier_block tce_iommu_bus_nb = {
1151        .notifier_call = tce_iommu_bus_notifier,
1152};
1153
1154int __init tce_iommu_bus_notifier_init(void)
1155{
1156        bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
1157        return 0;
1158}
1159#endif /* CONFIG_IOMMU_API */
1160