linux/arch/sparc/kernel/iommu.c
<<
>>
Prefs
   1/* iommu.c: Generic sparc64 IOMMU support.
   2 *
   3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
   4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/slab.h>
  10#include <linux/delay.h>
  11#include <linux/device.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/errno.h>
  14#include <linux/iommu-helper.h>
  15#include <linux/bitmap.h>
  16
  17#ifdef CONFIG_PCI
  18#include <linux/pci.h>
  19#endif
  20
  21#include <asm/iommu.h>
  22
  23#include "iommu_common.h"
  24
  25#define STC_CTXMATCH_ADDR(STC, CTX)     \
  26        ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
  27#define STC_FLUSHFLAG_INIT(STC) \
  28        (*((STC)->strbuf_flushflag) = 0UL)
  29#define STC_FLUSHFLAG_SET(STC) \
  30        (*((STC)->strbuf_flushflag) != 0UL)
  31
  32#define iommu_read(__reg) \
  33({      u64 __ret; \
  34        __asm__ __volatile__("ldxa [%1] %2, %0" \
  35                             : "=r" (__ret) \
  36                             : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
  37                             : "memory"); \
  38        __ret; \
  39})
  40#define iommu_write(__reg, __val) \
  41        __asm__ __volatile__("stxa %0, [%1] %2" \
  42                             : /* no outputs */ \
  43                             : "r" (__val), "r" (__reg), \
  44                               "i" (ASI_PHYS_BYPASS_EC_E))
  45
  46/* Must be invoked under the IOMMU lock. */
  47static void iommu_flushall(struct iommu *iommu)
  48{
  49        if (iommu->iommu_flushinv) {
  50                iommu_write(iommu->iommu_flushinv, ~(u64)0);
  51        } else {
  52                unsigned long tag;
  53                int entry;
  54
  55                tag = iommu->iommu_tags;
  56                for (entry = 0; entry < 16; entry++) {
  57                        iommu_write(tag, 0);
  58                        tag += 8;
  59                }
  60
  61                /* Ensure completion of previous PIO writes. */
  62                (void) iommu_read(iommu->write_complete_reg);
  63        }
  64}
  65
  66#define IOPTE_CONSISTENT(CTX) \
  67        (IOPTE_VALID | IOPTE_CACHE | \
  68         (((CTX) << 47) & IOPTE_CONTEXT))
  69
  70#define IOPTE_STREAMING(CTX) \
  71        (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
  72
  73/* Existing mappings are never marked invalid, instead they
  74 * are pointed to a dummy page.
  75 */
  76#define IOPTE_IS_DUMMY(iommu, iopte)    \
  77        ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
  78
  79static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
  80{
  81        unsigned long val = iopte_val(*iopte);
  82
  83        val &= ~IOPTE_PAGE;
  84        val |= iommu->dummy_page_pa;
  85
  86        iopte_val(*iopte) = val;
  87}
  88
  89/* Based almost entirely upon the ppc64 iommu allocator.  If you use the 'handle'
  90 * facility it must all be done in one pass while under the iommu lock.
  91 *
  92 * On sun4u platforms, we only flush the IOMMU once every time we've passed
  93 * over the entire page table doing allocations.  Therefore we only ever advance
  94 * the hint and cannot backtrack it.
  95 */
  96unsigned long iommu_range_alloc(struct device *dev,
  97                                struct iommu *iommu,
  98                                unsigned long npages,
  99                                unsigned long *handle)
 100{
 101        unsigned long n, end, start, limit, boundary_size;
 102        struct iommu_arena *arena = &iommu->arena;
 103        int pass = 0;
 104
 105        /* This allocator was derived from x86_64's bit string search */
 106
 107        /* Sanity check */
 108        if (unlikely(npages == 0)) {
 109                if (printk_ratelimit())
 110                        WARN_ON(1);
 111                return DMA_ERROR_CODE;
 112        }
 113
 114        if (handle && *handle)
 115                start = *handle;
 116        else
 117                start = arena->hint;
 118
 119        limit = arena->limit;
 120
 121        /* The case below can happen if we have a small segment appended
 122         * to a large, or when the previous alloc was at the very end of
 123         * the available space. If so, go back to the beginning and flush.
 124         */
 125        if (start >= limit) {
 126                start = 0;
 127                if (iommu->flush_all)
 128                        iommu->flush_all(iommu);
 129        }
 130
 131 again:
 132
 133        if (dev)
 134                boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 135                                      1 << IO_PAGE_SHIFT);
 136        else
 137                boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
 138
 139        n = iommu_area_alloc(arena->map, limit, start, npages,
 140                             iommu->page_table_map_base >> IO_PAGE_SHIFT,
 141                             boundary_size >> IO_PAGE_SHIFT, 0);
 142        if (n == -1) {
 143                if (likely(pass < 1)) {
 144                        /* First failure, rescan from the beginning.  */
 145                        start = 0;
 146                        if (iommu->flush_all)
 147                                iommu->flush_all(iommu);
 148                        pass++;
 149                        goto again;
 150                } else {
 151                        /* Second failure, give up */
 152                        return DMA_ERROR_CODE;
 153                }
 154        }
 155
 156        end = n + npages;
 157
 158        arena->hint = end;
 159
 160        /* Update handle for SG allocations */
 161        if (handle)
 162                *handle = end;
 163
 164        return n;
 165}
 166
 167void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
 168{
 169        struct iommu_arena *arena = &iommu->arena;
 170        unsigned long entry;
 171
 172        entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
 173
 174        bitmap_clear(arena->map, entry, npages);
 175}
 176
 177int iommu_table_init(struct iommu *iommu, int tsbsize,
 178                     u32 dma_offset, u32 dma_addr_mask,
 179                     int numa_node)
 180{
 181        unsigned long i, order, sz, num_tsb_entries;
 182        struct page *page;
 183
 184        num_tsb_entries = tsbsize / sizeof(iopte_t);
 185
 186        /* Setup initial software IOMMU state. */
 187        spin_lock_init(&iommu->lock);
 188        iommu->ctx_lowest_free = 1;
 189        iommu->page_table_map_base = dma_offset;
 190        iommu->dma_addr_mask = dma_addr_mask;
 191
 192        /* Allocate and initialize the free area map.  */
 193        sz = num_tsb_entries / 8;
 194        sz = (sz + 7UL) & ~7UL;
 195        iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
 196        if (!iommu->arena.map) {
 197                printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
 198                return -ENOMEM;
 199        }
 200        memset(iommu->arena.map, 0, sz);
 201        iommu->arena.limit = num_tsb_entries;
 202
 203        if (tlb_type != hypervisor)
 204                iommu->flush_all = iommu_flushall;
 205
 206        /* Allocate and initialize the dummy page which we
 207         * set inactive IO PTEs to point to.
 208         */
 209        page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
 210        if (!page) {
 211                printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
 212                goto out_free_map;
 213        }
 214        iommu->dummy_page = (unsigned long) page_address(page);
 215        memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
 216        iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
 217
 218        /* Now allocate and setup the IOMMU page table itself.  */
 219        order = get_order(tsbsize);
 220        page = alloc_pages_node(numa_node, GFP_KERNEL, order);
 221        if (!page) {
 222                printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
 223                goto out_free_dummy_page;
 224        }
 225        iommu->page_table = (iopte_t *)page_address(page);
 226
 227        for (i = 0; i < num_tsb_entries; i++)
 228                iopte_make_dummy(iommu, &iommu->page_table[i]);
 229
 230        return 0;
 231
 232out_free_dummy_page:
 233        free_page(iommu->dummy_page);
 234        iommu->dummy_page = 0UL;
 235
 236out_free_map:
 237        kfree(iommu->arena.map);
 238        iommu->arena.map = NULL;
 239
 240        return -ENOMEM;
 241}
 242
 243static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
 244                                    unsigned long npages)
 245{
 246        unsigned long entry;
 247
 248        entry = iommu_range_alloc(dev, iommu, npages, NULL);
 249        if (unlikely(entry == DMA_ERROR_CODE))
 250                return NULL;
 251
 252        return iommu->page_table + entry;
 253}
 254
 255static int iommu_alloc_ctx(struct iommu *iommu)
 256{
 257        int lowest = iommu->ctx_lowest_free;
 258        int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
 259
 260        if (unlikely(n == IOMMU_NUM_CTXS)) {
 261                n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
 262                if (unlikely(n == lowest)) {
 263                        printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
 264                        n = 0;
 265                }
 266        }
 267        if (n)
 268                __set_bit(n, iommu->ctx_bitmap);
 269
 270        return n;
 271}
 272
 273static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
 274{
 275        if (likely(ctx)) {
 276                __clear_bit(ctx, iommu->ctx_bitmap);
 277                if (ctx < iommu->ctx_lowest_free)
 278                        iommu->ctx_lowest_free = ctx;
 279        }
 280}
 281
 282static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
 283                                   dma_addr_t *dma_addrp, gfp_t gfp)
 284{
 285        unsigned long flags, order, first_page;
 286        struct iommu *iommu;
 287        struct page *page;
 288        int npages, nid;
 289        iopte_t *iopte;
 290        void *ret;
 291
 292        size = IO_PAGE_ALIGN(size);
 293        order = get_order(size);
 294        if (order >= 10)
 295                return NULL;
 296
 297        nid = dev->archdata.numa_node;
 298        page = alloc_pages_node(nid, gfp, order);
 299        if (unlikely(!page))
 300                return NULL;
 301
 302        first_page = (unsigned long) page_address(page);
 303        memset((char *)first_page, 0, PAGE_SIZE << order);
 304
 305        iommu = dev->archdata.iommu;
 306
 307        spin_lock_irqsave(&iommu->lock, flags);
 308        iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
 309        spin_unlock_irqrestore(&iommu->lock, flags);
 310
 311        if (unlikely(iopte == NULL)) {
 312                free_pages(first_page, order);
 313                return NULL;
 314        }
 315
 316        *dma_addrp = (iommu->page_table_map_base +
 317                      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
 318        ret = (void *) first_page;
 319        npages = size >> IO_PAGE_SHIFT;
 320        first_page = __pa(first_page);
 321        while (npages--) {
 322                iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
 323                                     IOPTE_WRITE |
 324                                     (first_page & IOPTE_PAGE));
 325                iopte++;
 326                first_page += IO_PAGE_SIZE;
 327        }
 328
 329        return ret;
 330}
 331
 332static void dma_4u_free_coherent(struct device *dev, size_t size,
 333                                 void *cpu, dma_addr_t dvma)
 334{
 335        struct iommu *iommu;
 336        iopte_t *iopte;
 337        unsigned long flags, order, npages;
 338
 339        npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
 340        iommu = dev->archdata.iommu;
 341        iopte = iommu->page_table +
 342                ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 343
 344        spin_lock_irqsave(&iommu->lock, flags);
 345
 346        iommu_range_free(iommu, dvma, npages);
 347
 348        spin_unlock_irqrestore(&iommu->lock, flags);
 349
 350        order = get_order(size);
 351        if (order < 10)
 352                free_pages((unsigned long)cpu, order);
 353}
 354
 355static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
 356                                  unsigned long offset, size_t sz,
 357                                  enum dma_data_direction direction,
 358                                  struct dma_attrs *attrs)
 359{
 360        struct iommu *iommu;
 361        struct strbuf *strbuf;
 362        iopte_t *base;
 363        unsigned long flags, npages, oaddr;
 364        unsigned long i, base_paddr, ctx;
 365        u32 bus_addr, ret;
 366        unsigned long iopte_protection;
 367
 368        iommu = dev->archdata.iommu;
 369        strbuf = dev->archdata.stc;
 370
 371        if (unlikely(direction == DMA_NONE))
 372                goto bad_no_ctx;
 373
 374        oaddr = (unsigned long)(page_address(page) + offset);
 375        npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 376        npages >>= IO_PAGE_SHIFT;
 377
 378        spin_lock_irqsave(&iommu->lock, flags);
 379        base = alloc_npages(dev, iommu, npages);
 380        ctx = 0;
 381        if (iommu->iommu_ctxflush)
 382                ctx = iommu_alloc_ctx(iommu);
 383        spin_unlock_irqrestore(&iommu->lock, flags);
 384
 385        if (unlikely(!base))
 386                goto bad;
 387
 388        bus_addr = (iommu->page_table_map_base +
 389                    ((base - iommu->page_table) << IO_PAGE_SHIFT));
 390        ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
 391        base_paddr = __pa(oaddr & IO_PAGE_MASK);
 392        if (strbuf->strbuf_enabled)
 393                iopte_protection = IOPTE_STREAMING(ctx);
 394        else
 395                iopte_protection = IOPTE_CONSISTENT(ctx);
 396        if (direction != DMA_TO_DEVICE)
 397                iopte_protection |= IOPTE_WRITE;
 398
 399        for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
 400                iopte_val(*base) = iopte_protection | base_paddr;
 401
 402        return ret;
 403
 404bad:
 405        iommu_free_ctx(iommu, ctx);
 406bad_no_ctx:
 407        if (printk_ratelimit())
 408                WARN_ON(1);
 409        return DMA_ERROR_CODE;
 410}
 411
 412static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
 413                         u32 vaddr, unsigned long ctx, unsigned long npages,
 414                         enum dma_data_direction direction)
 415{
 416        int limit;
 417
 418        if (strbuf->strbuf_ctxflush &&
 419            iommu->iommu_ctxflush) {
 420                unsigned long matchreg, flushreg;
 421                u64 val;
 422
 423                flushreg = strbuf->strbuf_ctxflush;
 424                matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
 425
 426                iommu_write(flushreg, ctx);
 427                val = iommu_read(matchreg);
 428                val &= 0xffff;
 429                if (!val)
 430                        goto do_flush_sync;
 431
 432                while (val) {
 433                        if (val & 0x1)
 434                                iommu_write(flushreg, ctx);
 435                        val >>= 1;
 436                }
 437                val = iommu_read(matchreg);
 438                if (unlikely(val)) {
 439                        printk(KERN_WARNING "strbuf_flush: ctx flush "
 440                               "timeout matchreg[%llx] ctx[%lx]\n",
 441                               val, ctx);
 442                        goto do_page_flush;
 443                }
 444        } else {
 445                unsigned long i;
 446
 447        do_page_flush:
 448                for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
 449                        iommu_write(strbuf->strbuf_pflush, vaddr);
 450        }
 451
 452do_flush_sync:
 453        /* If the device could not have possibly put dirty data into
 454         * the streaming cache, no flush-flag synchronization needs
 455         * to be performed.
 456         */
 457        if (direction == DMA_TO_DEVICE)
 458                return;
 459
 460        STC_FLUSHFLAG_INIT(strbuf);
 461        iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
 462        (void) iommu_read(iommu->write_complete_reg);
 463
 464        limit = 100000;
 465        while (!STC_FLUSHFLAG_SET(strbuf)) {
 466                limit--;
 467                if (!limit)
 468                        break;
 469                udelay(1);
 470                rmb();
 471        }
 472        if (!limit)
 473                printk(KERN_WARNING "strbuf_flush: flushflag timeout "
 474                       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
 475                       vaddr, ctx, npages);
 476}
 477
 478static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
 479                              size_t sz, enum dma_data_direction direction,
 480                              struct dma_attrs *attrs)
 481{
 482        struct iommu *iommu;
 483        struct strbuf *strbuf;
 484        iopte_t *base;
 485        unsigned long flags, npages, ctx, i;
 486
 487        if (unlikely(direction == DMA_NONE)) {
 488                if (printk_ratelimit())
 489                        WARN_ON(1);
 490                return;
 491        }
 492
 493        iommu = dev->archdata.iommu;
 494        strbuf = dev->archdata.stc;
 495
 496        npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 497        npages >>= IO_PAGE_SHIFT;
 498        base = iommu->page_table +
 499                ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 500        bus_addr &= IO_PAGE_MASK;
 501
 502        spin_lock_irqsave(&iommu->lock, flags);
 503
 504        /* Record the context, if any. */
 505        ctx = 0;
 506        if (iommu->iommu_ctxflush)
 507                ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
 508
 509        /* Step 1: Kick data out of streaming buffers if necessary. */
 510        if (strbuf->strbuf_enabled)
 511                strbuf_flush(strbuf, iommu, bus_addr, ctx,
 512                             npages, direction);
 513
 514        /* Step 2: Clear out TSB entries. */
 515        for (i = 0; i < npages; i++)
 516                iopte_make_dummy(iommu, base + i);
 517
 518        iommu_range_free(iommu, bus_addr, npages);
 519
 520        iommu_free_ctx(iommu, ctx);
 521
 522        spin_unlock_irqrestore(&iommu->lock, flags);
 523}
 524
 525static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
 526                         int nelems, enum dma_data_direction direction,
 527                         struct dma_attrs *attrs)
 528{
 529        struct scatterlist *s, *outs, *segstart;
 530        unsigned long flags, handle, prot, ctx;
 531        dma_addr_t dma_next = 0, dma_addr;
 532        unsigned int max_seg_size;
 533        unsigned long seg_boundary_size;
 534        int outcount, incount, i;
 535        struct strbuf *strbuf;
 536        struct iommu *iommu;
 537        unsigned long base_shift;
 538
 539        BUG_ON(direction == DMA_NONE);
 540
 541        iommu = dev->archdata.iommu;
 542        strbuf = dev->archdata.stc;
 543        if (nelems == 0 || !iommu)
 544                return 0;
 545
 546        spin_lock_irqsave(&iommu->lock, flags);
 547
 548        ctx = 0;
 549        if (iommu->iommu_ctxflush)
 550                ctx = iommu_alloc_ctx(iommu);
 551
 552        if (strbuf->strbuf_enabled)
 553                prot = IOPTE_STREAMING(ctx);
 554        else
 555                prot = IOPTE_CONSISTENT(ctx);
 556        if (direction != DMA_TO_DEVICE)
 557                prot |= IOPTE_WRITE;
 558
 559        outs = s = segstart = &sglist[0];
 560        outcount = 1;
 561        incount = nelems;
 562        handle = 0;
 563
 564        /* Init first segment length for backout at failure */
 565        outs->dma_length = 0;
 566
 567        max_seg_size = dma_get_max_seg_size(dev);
 568        seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 569                                  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
 570        base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
 571        for_each_sg(sglist, s, nelems, i) {
 572                unsigned long paddr, npages, entry, out_entry = 0, slen;
 573                iopte_t *base;
 574
 575                slen = s->length;
 576                /* Sanity check */
 577                if (slen == 0) {
 578                        dma_next = 0;
 579                        continue;
 580                }
 581                /* Allocate iommu entries for that segment */
 582                paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
 583                npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
 584                entry = iommu_range_alloc(dev, iommu, npages, &handle);
 585
 586                /* Handle failure */
 587                if (unlikely(entry == DMA_ERROR_CODE)) {
 588                        if (printk_ratelimit())
 589                                printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
 590                                       " npages %lx\n", iommu, paddr, npages);
 591                        goto iommu_map_failed;
 592                }
 593
 594                base = iommu->page_table + entry;
 595
 596                /* Convert entry to a dma_addr_t */
 597                dma_addr = iommu->page_table_map_base +
 598                        (entry << IO_PAGE_SHIFT);
 599                dma_addr |= (s->offset & ~IO_PAGE_MASK);
 600
 601                /* Insert into HW table */
 602                paddr &= IO_PAGE_MASK;
 603                while (npages--) {
 604                        iopte_val(*base) = prot | paddr;
 605                        base++;
 606                        paddr += IO_PAGE_SIZE;
 607                }
 608
 609                /* If we are in an open segment, try merging */
 610                if (segstart != s) {
 611                        /* We cannot merge if:
 612                         * - allocated dma_addr isn't contiguous to previous allocation
 613                         */
 614                        if ((dma_addr != dma_next) ||
 615                            (outs->dma_length + s->length > max_seg_size) ||
 616                            (is_span_boundary(out_entry, base_shift,
 617                                              seg_boundary_size, outs, s))) {
 618                                /* Can't merge: create a new segment */
 619                                segstart = s;
 620                                outcount++;
 621                                outs = sg_next(outs);
 622                        } else {
 623                                outs->dma_length += s->length;
 624                        }
 625                }
 626
 627                if (segstart == s) {
 628                        /* This is a new segment, fill entries */
 629                        outs->dma_address = dma_addr;
 630                        outs->dma_length = slen;
 631                        out_entry = entry;
 632                }
 633
 634                /* Calculate next page pointer for contiguous check */
 635                dma_next = dma_addr + slen;
 636        }
 637
 638        spin_unlock_irqrestore(&iommu->lock, flags);
 639
 640        if (outcount < incount) {
 641                outs = sg_next(outs);
 642                outs->dma_address = DMA_ERROR_CODE;
 643                outs->dma_length = 0;
 644        }
 645
 646        return outcount;
 647
 648iommu_map_failed:
 649        for_each_sg(sglist, s, nelems, i) {
 650                if (s->dma_length != 0) {
 651                        unsigned long vaddr, npages, entry, j;
 652                        iopte_t *base;
 653
 654                        vaddr = s->dma_address & IO_PAGE_MASK;
 655                        npages = iommu_num_pages(s->dma_address, s->dma_length,
 656                                                 IO_PAGE_SIZE);
 657                        iommu_range_free(iommu, vaddr, npages);
 658
 659                        entry = (vaddr - iommu->page_table_map_base)
 660                                >> IO_PAGE_SHIFT;
 661                        base = iommu->page_table + entry;
 662
 663                        for (j = 0; j < npages; j++)
 664                                iopte_make_dummy(iommu, base + j);
 665
 666                        s->dma_address = DMA_ERROR_CODE;
 667                        s->dma_length = 0;
 668                }
 669                if (s == outs)
 670                        break;
 671        }
 672        spin_unlock_irqrestore(&iommu->lock, flags);
 673
 674        return 0;
 675}
 676
 677/* If contexts are being used, they are the same in all of the mappings
 678 * we make for a particular SG.
 679 */
 680static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
 681{
 682        unsigned long ctx = 0;
 683
 684        if (iommu->iommu_ctxflush) {
 685                iopte_t *base;
 686                u32 bus_addr;
 687
 688                bus_addr = sg->dma_address & IO_PAGE_MASK;
 689                base = iommu->page_table +
 690                        ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 691
 692                ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
 693        }
 694        return ctx;
 695}
 696
 697static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
 698                            int nelems, enum dma_data_direction direction,
 699                            struct dma_attrs *attrs)
 700{
 701        unsigned long flags, ctx;
 702        struct scatterlist *sg;
 703        struct strbuf *strbuf;
 704        struct iommu *iommu;
 705
 706        BUG_ON(direction == DMA_NONE);
 707
 708        iommu = dev->archdata.iommu;
 709        strbuf = dev->archdata.stc;
 710
 711        ctx = fetch_sg_ctx(iommu, sglist);
 712
 713        spin_lock_irqsave(&iommu->lock, flags);
 714
 715        sg = sglist;
 716        while (nelems--) {
 717                dma_addr_t dma_handle = sg->dma_address;
 718                unsigned int len = sg->dma_length;
 719                unsigned long npages, entry;
 720                iopte_t *base;
 721                int i;
 722
 723                if (!len)
 724                        break;
 725                npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 726                iommu_range_free(iommu, dma_handle, npages);
 727
 728                entry = ((dma_handle - iommu->page_table_map_base)
 729                         >> IO_PAGE_SHIFT);
 730                base = iommu->page_table + entry;
 731
 732                dma_handle &= IO_PAGE_MASK;
 733                if (strbuf->strbuf_enabled)
 734                        strbuf_flush(strbuf, iommu, dma_handle, ctx,
 735                                     npages, direction);
 736
 737                for (i = 0; i < npages; i++)
 738                        iopte_make_dummy(iommu, base + i);
 739
 740                sg = sg_next(sg);
 741        }
 742
 743        iommu_free_ctx(iommu, ctx);
 744
 745        spin_unlock_irqrestore(&iommu->lock, flags);
 746}
 747
 748static void dma_4u_sync_single_for_cpu(struct device *dev,
 749                                       dma_addr_t bus_addr, size_t sz,
 750                                       enum dma_data_direction direction)
 751{
 752        struct iommu *iommu;
 753        struct strbuf *strbuf;
 754        unsigned long flags, ctx, npages;
 755
 756        iommu = dev->archdata.iommu;
 757        strbuf = dev->archdata.stc;
 758
 759        if (!strbuf->strbuf_enabled)
 760                return;
 761
 762        spin_lock_irqsave(&iommu->lock, flags);
 763
 764        npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 765        npages >>= IO_PAGE_SHIFT;
 766        bus_addr &= IO_PAGE_MASK;
 767
 768        /* Step 1: Record the context, if any. */
 769        ctx = 0;
 770        if (iommu->iommu_ctxflush &&
 771            strbuf->strbuf_ctxflush) {
 772                iopte_t *iopte;
 773
 774                iopte = iommu->page_table +
 775                        ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
 776                ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
 777        }
 778
 779        /* Step 2: Kick data out of streaming buffers. */
 780        strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
 781
 782        spin_unlock_irqrestore(&iommu->lock, flags);
 783}
 784
 785static void dma_4u_sync_sg_for_cpu(struct device *dev,
 786                                   struct scatterlist *sglist, int nelems,
 787                                   enum dma_data_direction direction)
 788{
 789        struct iommu *iommu;
 790        struct strbuf *strbuf;
 791        unsigned long flags, ctx, npages, i;
 792        struct scatterlist *sg, *sgprv;
 793        u32 bus_addr;
 794
 795        iommu = dev->archdata.iommu;
 796        strbuf = dev->archdata.stc;
 797
 798        if (!strbuf->strbuf_enabled)
 799                return;
 800
 801        spin_lock_irqsave(&iommu->lock, flags);
 802
 803        /* Step 1: Record the context, if any. */
 804        ctx = 0;
 805        if (iommu->iommu_ctxflush &&
 806            strbuf->strbuf_ctxflush) {
 807                iopte_t *iopte;
 808
 809                iopte = iommu->page_table +
 810                        ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 811                ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
 812        }
 813
 814        /* Step 2: Kick data out of streaming buffers. */
 815        bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
 816        sgprv = NULL;
 817        for_each_sg(sglist, sg, nelems, i) {
 818                if (sg->dma_length == 0)
 819                        break;
 820                sgprv = sg;
 821        }
 822
 823        npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
 824                  - bus_addr) >> IO_PAGE_SHIFT;
 825        strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
 826
 827        spin_unlock_irqrestore(&iommu->lock, flags);
 828}
 829
 830static struct dma_map_ops sun4u_dma_ops = {
 831        .alloc_coherent         = dma_4u_alloc_coherent,
 832        .free_coherent          = dma_4u_free_coherent,
 833        .map_page               = dma_4u_map_page,
 834        .unmap_page             = dma_4u_unmap_page,
 835        .map_sg                 = dma_4u_map_sg,
 836        .unmap_sg               = dma_4u_unmap_sg,
 837        .sync_single_for_cpu    = dma_4u_sync_single_for_cpu,
 838        .sync_sg_for_cpu        = dma_4u_sync_sg_for_cpu,
 839};
 840
 841struct dma_map_ops *dma_ops = &sun4u_dma_ops;
 842EXPORT_SYMBOL(dma_ops);
 843
 844extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
 845
 846int dma_supported(struct device *dev, u64 device_mask)
 847{
 848        struct iommu *iommu = dev->archdata.iommu;
 849        u64 dma_addr_mask = iommu->dma_addr_mask;
 850
 851        if (device_mask >= (1UL << 32UL))
 852                return 0;
 853
 854        if ((device_mask & dma_addr_mask) == dma_addr_mask)
 855                return 1;
 856
 857#ifdef CONFIG_PCI
 858        if (dev->bus == &pci_bus_type)
 859                return pci64_dma_supported(to_pci_dev(dev), device_mask);
 860#endif
 861
 862        return 0;
 863}
 864EXPORT_SYMBOL(dma_supported);
 865