linux/drivers/iommu/dma-iommu.c
<<
>>
Prefs
   1/*
   2 * A fairly generic DMA-API to IOMMU-API glue layer.
   3 *
   4 * Copyright (C) 2014-2015 ARM Ltd.
   5 *
   6 * based in part on arch/arm/mm/dma-mapping.c:
   7 * Copyright (C) 2000-2004 Russell King
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  20 */
  21
  22#include <linux/acpi_iort.h>
  23#include <linux/device.h>
  24#include <linux/dma-iommu.h>
  25#include <linux/gfp.h>
  26#include <linux/huge_mm.h>
  27#include <linux/iommu.h>
  28#include <linux/iova.h>
  29#include <linux/irq.h>
  30#include <linux/mm.h>
  31#include <linux/pci.h>
  32#include <linux/scatterlist.h>
  33#include <linux/vmalloc.h>
  34
  35#define IOMMU_MAPPING_ERROR     0
  36
  37struct iommu_dma_msi_page {
  38        struct list_head        list;
  39        dma_addr_t              iova;
  40        phys_addr_t             phys;
  41};
  42
  43enum iommu_dma_cookie_type {
  44        IOMMU_DMA_IOVA_COOKIE,
  45        IOMMU_DMA_MSI_COOKIE,
  46};
  47
  48struct iommu_dma_cookie {
  49        enum iommu_dma_cookie_type      type;
  50        union {
  51                /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
  52                struct iova_domain      iovad;
  53                /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
  54                dma_addr_t              msi_iova;
  55        };
  56        struct list_head                msi_page_list;
  57        spinlock_t                      msi_lock;
  58};
  59
  60static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
  61{
  62        if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
  63                return cookie->iovad.granule;
  64        return PAGE_SIZE;
  65}
  66
  67static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
  68{
  69        struct iommu_dma_cookie *cookie;
  70
  71        cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
  72        if (cookie) {
  73                spin_lock_init(&cookie->msi_lock);
  74                INIT_LIST_HEAD(&cookie->msi_page_list);
  75                cookie->type = type;
  76        }
  77        return cookie;
  78}
  79
  80int iommu_dma_init(void)
  81{
  82        return iova_cache_get();
  83}
  84
  85/**
  86 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
  87 * @domain: IOMMU domain to prepare for DMA-API usage
  88 *
  89 * IOMMU drivers should normally call this from their domain_alloc
  90 * callback when domain->type == IOMMU_DOMAIN_DMA.
  91 */
  92int iommu_get_dma_cookie(struct iommu_domain *domain)
  93{
  94        if (domain->iova_cookie)
  95                return -EEXIST;
  96
  97        domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
  98        if (!domain->iova_cookie)
  99                return -ENOMEM;
 100
 101        return 0;
 102}
 103EXPORT_SYMBOL(iommu_get_dma_cookie);
 104
 105/**
 106 * iommu_get_msi_cookie - Acquire just MSI remapping resources
 107 * @domain: IOMMU domain to prepare
 108 * @base: Start address of IOVA region for MSI mappings
 109 *
 110 * Users who manage their own IOVA allocation and do not want DMA API support,
 111 * but would still like to take advantage of automatic MSI remapping, can use
 112 * this to initialise their own domain appropriately. Users should reserve a
 113 * contiguous IOVA region, starting at @base, large enough to accommodate the
 114 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
 115 * used by the devices attached to @domain.
 116 */
 117int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
 118{
 119        struct iommu_dma_cookie *cookie;
 120
 121        if (domain->type != IOMMU_DOMAIN_UNMANAGED)
 122                return -EINVAL;
 123
 124        if (domain->iova_cookie)
 125                return -EEXIST;
 126
 127        cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
 128        if (!cookie)
 129                return -ENOMEM;
 130
 131        cookie->msi_iova = base;
 132        domain->iova_cookie = cookie;
 133        return 0;
 134}
 135EXPORT_SYMBOL(iommu_get_msi_cookie);
 136
 137/**
 138 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
 139 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
 140 *          iommu_get_msi_cookie()
 141 *
 142 * IOMMU drivers should normally call this from their domain_free callback.
 143 */
 144void iommu_put_dma_cookie(struct iommu_domain *domain)
 145{
 146        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 147        struct iommu_dma_msi_page *msi, *tmp;
 148
 149        if (!cookie)
 150                return;
 151
 152        if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
 153                put_iova_domain(&cookie->iovad);
 154
 155        list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
 156                list_del(&msi->list);
 157                kfree(msi);
 158        }
 159        kfree(cookie);
 160        domain->iova_cookie = NULL;
 161}
 162EXPORT_SYMBOL(iommu_put_dma_cookie);
 163
 164/**
 165 * iommu_dma_get_resv_regions - Reserved region driver helper
 166 * @dev: Device from iommu_get_resv_regions()
 167 * @list: Reserved region list from iommu_get_resv_regions()
 168 *
 169 * IOMMU drivers can use this to implement their .get_resv_regions callback
 170 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
 171 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
 172 * reservation.
 173 */
 174void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
 175{
 176
 177        if (!is_of_node(dev->iommu_fwspec->iommu_fwnode))
 178                iort_iommu_msi_get_resv_regions(dev, list);
 179
 180}
 181EXPORT_SYMBOL(iommu_dma_get_resv_regions);
 182
 183static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
 184                phys_addr_t start, phys_addr_t end)
 185{
 186        struct iova_domain *iovad = &cookie->iovad;
 187        struct iommu_dma_msi_page *msi_page;
 188        int i, num_pages;
 189
 190        start -= iova_offset(iovad, start);
 191        num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
 192
 193        msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
 194        if (!msi_page)
 195                return -ENOMEM;
 196
 197        for (i = 0; i < num_pages; i++) {
 198                msi_page[i].phys = start;
 199                msi_page[i].iova = start;
 200                INIT_LIST_HEAD(&msi_page[i].list);
 201                list_add(&msi_page[i].list, &cookie->msi_page_list);
 202                start += iovad->granule;
 203        }
 204
 205        return 0;
 206}
 207
 208static void iova_reserve_pci_windows(struct pci_dev *dev,
 209                struct iova_domain *iovad)
 210{
 211        struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
 212        struct resource_entry *window;
 213        unsigned long lo, hi;
 214
 215        resource_list_for_each_entry(window, &bridge->windows) {
 216                if (resource_type(window->res) != IORESOURCE_MEM)
 217                        continue;
 218
 219                lo = iova_pfn(iovad, window->res->start - window->offset);
 220                hi = iova_pfn(iovad, window->res->end - window->offset);
 221                reserve_iova(iovad, lo, hi);
 222        }
 223}
 224
 225static int iova_reserve_iommu_regions(struct device *dev,
 226                struct iommu_domain *domain)
 227{
 228        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 229        struct iova_domain *iovad = &cookie->iovad;
 230        struct iommu_resv_region *region;
 231        LIST_HEAD(resv_regions);
 232        int ret = 0;
 233
 234        if (dev_is_pci(dev))
 235                iova_reserve_pci_windows(to_pci_dev(dev), iovad);
 236
 237        iommu_get_resv_regions(dev, &resv_regions);
 238        list_for_each_entry(region, &resv_regions, list) {
 239                unsigned long lo, hi;
 240
 241                /* We ARE the software that manages these! */
 242                if (region->type == IOMMU_RESV_SW_MSI)
 243                        continue;
 244
 245                lo = iova_pfn(iovad, region->start);
 246                hi = iova_pfn(iovad, region->start + region->length - 1);
 247                reserve_iova(iovad, lo, hi);
 248
 249                if (region->type == IOMMU_RESV_MSI)
 250                        ret = cookie_init_hw_msi_region(cookie, region->start,
 251                                        region->start + region->length);
 252                if (ret)
 253                        break;
 254        }
 255        iommu_put_resv_regions(dev, &resv_regions);
 256
 257        return ret;
 258}
 259
 260/**
 261 * iommu_dma_init_domain - Initialise a DMA mapping domain
 262 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
 263 * @base: IOVA at which the mappable address space starts
 264 * @size: Size of IOVA space
 265 * @dev: Device the domain is being initialised for
 266 *
 267 * @base and @size should be exact multiples of IOMMU page granularity to
 268 * avoid rounding surprises. If necessary, we reserve the page at address 0
 269 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
 270 * any change which could make prior IOVAs invalid will fail.
 271 */
 272int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
 273                u64 size, struct device *dev)
 274{
 275        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 276        struct iova_domain *iovad = &cookie->iovad;
 277        unsigned long order, base_pfn, end_pfn;
 278
 279        if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
 280                return -EINVAL;
 281
 282        /* Use the smallest supported page size for IOVA granularity */
 283        order = __ffs(domain->pgsize_bitmap);
 284        base_pfn = max_t(unsigned long, 1, base >> order);
 285        end_pfn = (base + size - 1) >> order;
 286
 287        /* Check the domain allows at least some access to the device... */
 288        if (domain->geometry.force_aperture) {
 289                if (base > domain->geometry.aperture_end ||
 290                    base + size <= domain->geometry.aperture_start) {
 291                        pr_warn("specified DMA range outside IOMMU capability\n");
 292                        return -EFAULT;
 293                }
 294                /* ...then finally give it a kicking to make sure it fits */
 295                base_pfn = max_t(unsigned long, base_pfn,
 296                                domain->geometry.aperture_start >> order);
 297        }
 298
 299        /* start_pfn is always nonzero for an already-initialised domain */
 300        if (iovad->start_pfn) {
 301                if (1UL << order != iovad->granule ||
 302                    base_pfn != iovad->start_pfn) {
 303                        pr_warn("Incompatible range for DMA domain\n");
 304                        return -EFAULT;
 305                }
 306
 307                return 0;
 308        }
 309
 310        init_iova_domain(iovad, 1UL << order, base_pfn);
 311        if (!dev)
 312                return 0;
 313
 314        return iova_reserve_iommu_regions(dev, domain);
 315}
 316EXPORT_SYMBOL(iommu_dma_init_domain);
 317
 318/**
 319 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
 320 *                    page flags.
 321 * @dir: Direction of DMA transfer
 322 * @coherent: Is the DMA master cache-coherent?
 323 * @attrs: DMA attributes for the mapping
 324 *
 325 * Return: corresponding IOMMU API page protection flags
 326 */
 327int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
 328                     unsigned long attrs)
 329{
 330        int prot = coherent ? IOMMU_CACHE : 0;
 331
 332        if (attrs & DMA_ATTR_PRIVILEGED)
 333                prot |= IOMMU_PRIV;
 334
 335        switch (dir) {
 336        case DMA_BIDIRECTIONAL:
 337                return prot | IOMMU_READ | IOMMU_WRITE;
 338        case DMA_TO_DEVICE:
 339                return prot | IOMMU_READ;
 340        case DMA_FROM_DEVICE:
 341                return prot | IOMMU_WRITE;
 342        default:
 343                return 0;
 344        }
 345}
 346
 347static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
 348                size_t size, dma_addr_t dma_limit, struct device *dev)
 349{
 350        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 351        struct iova_domain *iovad = &cookie->iovad;
 352        unsigned long shift, iova_len, iova = 0;
 353
 354        if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
 355                cookie->msi_iova += size;
 356                return cookie->msi_iova - size;
 357        }
 358
 359        shift = iova_shift(iovad);
 360        iova_len = size >> shift;
 361        /*
 362         * Freeing non-power-of-two-sized allocations back into the IOVA caches
 363         * will come back to bite us badly, so we have to waste a bit of space
 364         * rounding up anything cacheable to make sure that can't happen. The
 365         * order of the unadjusted size will still match upon freeing.
 366         */
 367        if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
 368                iova_len = roundup_pow_of_two(iova_len);
 369
 370        if (domain->geometry.force_aperture)
 371                dma_limit = min(dma_limit, domain->geometry.aperture_end);
 372
 373        /* Try to get PCI devices a SAC address */
 374        if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
 375                iova = alloc_iova_fast(iovad, iova_len,
 376                                       DMA_BIT_MASK(32) >> shift, false);
 377
 378        if (!iova)
 379                iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
 380                                       true);
 381
 382        return (dma_addr_t)iova << shift;
 383}
 384
 385static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
 386                dma_addr_t iova, size_t size)
 387{
 388        struct iova_domain *iovad = &cookie->iovad;
 389
 390        /* The MSI case is only ever cleaning up its most recent allocation */
 391        if (cookie->type == IOMMU_DMA_MSI_COOKIE)
 392                cookie->msi_iova -= size;
 393        else
 394                free_iova_fast(iovad, iova_pfn(iovad, iova),
 395                                size >> iova_shift(iovad));
 396}
 397
 398static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
 399                size_t size)
 400{
 401        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 402        struct iova_domain *iovad = &cookie->iovad;
 403        size_t iova_off = iova_offset(iovad, dma_addr);
 404
 405        dma_addr -= iova_off;
 406        size = iova_align(iovad, size + iova_off);
 407
 408        WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
 409        iommu_dma_free_iova(cookie, dma_addr, size);
 410}
 411
 412static void __iommu_dma_free_pages(struct page **pages, int count)
 413{
 414        while (count--)
 415                __free_page(pages[count]);
 416        kvfree(pages);
 417}
 418
 419static struct page **__iommu_dma_alloc_pages(unsigned int count,
 420                unsigned long order_mask, gfp_t gfp)
 421{
 422        struct page **pages;
 423        unsigned int i = 0, array_size = count * sizeof(*pages);
 424
 425        order_mask &= (2U << MAX_ORDER) - 1;
 426        if (!order_mask)
 427                return NULL;
 428
 429        if (array_size <= PAGE_SIZE)
 430                pages = kzalloc(array_size, GFP_KERNEL);
 431        else
 432                pages = vzalloc(array_size);
 433        if (!pages)
 434                return NULL;
 435
 436        /* IOMMU can map any pages, so himem can also be used here */
 437        gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
 438
 439        while (count) {
 440                struct page *page = NULL;
 441                unsigned int order_size;
 442
 443                /*
 444                 * Higher-order allocations are a convenience rather
 445                 * than a necessity, hence using __GFP_NORETRY until
 446                 * falling back to minimum-order allocations.
 447                 */
 448                for (order_mask &= (2U << __fls(count)) - 1;
 449                     order_mask; order_mask &= ~order_size) {
 450                        unsigned int order = __fls(order_mask);
 451
 452                        order_size = 1U << order;
 453                        page = alloc_pages((order_mask - order_size) ?
 454                                           gfp | __GFP_NORETRY : gfp, order);
 455                        if (!page)
 456                                continue;
 457                        if (!order)
 458                                break;
 459                        if (!PageCompound(page)) {
 460                                split_page(page, order);
 461                                break;
 462                        } else if (!split_huge_page(page)) {
 463                                break;
 464                        }
 465                        __free_pages(page, order);
 466                }
 467                if (!page) {
 468                        __iommu_dma_free_pages(pages, i);
 469                        return NULL;
 470                }
 471                count -= order_size;
 472                while (order_size--)
 473                        pages[i++] = page++;
 474        }
 475        return pages;
 476}
 477
 478/**
 479 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
 480 * @dev: Device which owns this buffer
 481 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
 482 * @size: Size of buffer in bytes
 483 * @handle: DMA address of buffer
 484 *
 485 * Frees both the pages associated with the buffer, and the array
 486 * describing them
 487 */
 488void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
 489                dma_addr_t *handle)
 490{
 491        __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
 492        __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
 493        *handle = IOMMU_MAPPING_ERROR;
 494}
 495
 496/**
 497 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
 498 * @dev: Device to allocate memory for. Must be a real device
 499 *       attached to an iommu_dma_domain
 500 * @size: Size of buffer in bytes
 501 * @gfp: Allocation flags
 502 * @attrs: DMA attributes for this allocation
 503 * @prot: IOMMU mapping flags
 504 * @handle: Out argument for allocated DMA handle
 505 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
 506 *              given VA/PA are visible to the given non-coherent device.
 507 *
 508 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
 509 * but an IOMMU which supports smaller pages might not map the whole thing.
 510 *
 511 * Return: Array of struct page pointers describing the buffer,
 512 *         or NULL on failure.
 513 */
 514struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
 515                unsigned long attrs, int prot, dma_addr_t *handle,
 516                void (*flush_page)(struct device *, const void *, phys_addr_t))
 517{
 518        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 519        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 520        struct iova_domain *iovad = &cookie->iovad;
 521        struct page **pages;
 522        struct sg_table sgt;
 523        dma_addr_t iova;
 524        unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
 525
 526        *handle = IOMMU_MAPPING_ERROR;
 527
 528        min_size = alloc_sizes & -alloc_sizes;
 529        if (min_size < PAGE_SIZE) {
 530                min_size = PAGE_SIZE;
 531                alloc_sizes |= PAGE_SIZE;
 532        } else {
 533                size = ALIGN(size, min_size);
 534        }
 535        if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
 536                alloc_sizes = min_size;
 537
 538        count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 539        pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
 540        if (!pages)
 541                return NULL;
 542
 543        size = iova_align(iovad, size);
 544        iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
 545        if (!iova)
 546                goto out_free_pages;
 547
 548        if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
 549                goto out_free_iova;
 550
 551        if (!(prot & IOMMU_CACHE)) {
 552                struct sg_mapping_iter miter;
 553                /*
 554                 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
 555                 * sufficient here, so skip it by using the "wrong" direction.
 556                 */
 557                sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
 558                while (sg_miter_next(&miter))
 559                        flush_page(dev, miter.addr, page_to_phys(miter.page));
 560                sg_miter_stop(&miter);
 561        }
 562
 563        if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
 564                        < size)
 565                goto out_free_sg;
 566
 567        *handle = iova;
 568        sg_free_table(&sgt);
 569        return pages;
 570
 571out_free_sg:
 572        sg_free_table(&sgt);
 573out_free_iova:
 574        iommu_dma_free_iova(cookie, iova, size);
 575out_free_pages:
 576        __iommu_dma_free_pages(pages, count);
 577        return NULL;
 578}
 579
 580/**
 581 * iommu_dma_mmap - Map a buffer into provided user VMA
 582 * @pages: Array representing buffer from iommu_dma_alloc()
 583 * @size: Size of buffer in bytes
 584 * @vma: VMA describing requested userspace mapping
 585 *
 586 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
 587 * for verifying the correct size and protection of @vma beforehand.
 588 */
 589
 590int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
 591{
 592        unsigned long uaddr = vma->vm_start;
 593        unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 594        int ret = -ENXIO;
 595
 596        for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
 597                ret = vm_insert_page(vma, uaddr, pages[i]);
 598                if (ret)
 599                        break;
 600                uaddr += PAGE_SIZE;
 601        }
 602        return ret;
 603}
 604
 605static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
 606                size_t size, int prot)
 607{
 608        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 609        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 610        size_t iova_off = 0;
 611        dma_addr_t iova;
 612
 613        if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
 614                iova_off = iova_offset(&cookie->iovad, phys);
 615                size = iova_align(&cookie->iovad, size + iova_off);
 616        }
 617
 618        iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
 619        if (!iova)
 620                return IOMMU_MAPPING_ERROR;
 621
 622        if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
 623                iommu_dma_free_iova(cookie, iova, size);
 624                return IOMMU_MAPPING_ERROR;
 625        }
 626        return iova + iova_off;
 627}
 628
 629dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 630                unsigned long offset, size_t size, int prot)
 631{
 632        return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
 633}
 634
 635void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
 636                enum dma_data_direction dir, unsigned long attrs)
 637{
 638        __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
 639}
 640
 641/*
 642 * Prepare a successfully-mapped scatterlist to give back to the caller.
 643 *
 644 * At this point the segments are already laid out by iommu_dma_map_sg() to
 645 * avoid individually crossing any boundaries, so we merely need to check a
 646 * segment's start address to avoid concatenating across one.
 647 */
 648static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
 649                dma_addr_t dma_addr)
 650{
 651        struct scatterlist *s, *cur = sg;
 652        unsigned long seg_mask = dma_get_seg_boundary(dev);
 653        unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
 654        int i, count = 0;
 655
 656        for_each_sg(sg, s, nents, i) {
 657                /* Restore this segment's original unaligned fields first */
 658                unsigned int s_iova_off = sg_dma_address(s);
 659                unsigned int s_length = sg_dma_len(s);
 660                unsigned int s_iova_len = s->length;
 661
 662                s->offset += s_iova_off;
 663                s->length = s_length;
 664                sg_dma_address(s) = IOMMU_MAPPING_ERROR;
 665                sg_dma_len(s) = 0;
 666
 667                /*
 668                 * Now fill in the real DMA data. If...
 669                 * - there is a valid output segment to append to
 670                 * - and this segment starts on an IOVA page boundary
 671                 * - but doesn't fall at a segment boundary
 672                 * - and wouldn't make the resulting output segment too long
 673                 */
 674                if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
 675                    (cur_len + s_length <= max_len)) {
 676                        /* ...then concatenate it with the previous one */
 677                        cur_len += s_length;
 678                } else {
 679                        /* Otherwise start the next output segment */
 680                        if (i > 0)
 681                                cur = sg_next(cur);
 682                        cur_len = s_length;
 683                        count++;
 684
 685                        sg_dma_address(cur) = dma_addr + s_iova_off;
 686                }
 687
 688                sg_dma_len(cur) = cur_len;
 689                dma_addr += s_iova_len;
 690
 691                if (s_length + s_iova_off < s_iova_len)
 692                        cur_len = 0;
 693        }
 694        return count;
 695}
 696
 697/*
 698 * If mapping failed, then just restore the original list,
 699 * but making sure the DMA fields are invalidated.
 700 */
 701static void __invalidate_sg(struct scatterlist *sg, int nents)
 702{
 703        struct scatterlist *s;
 704        int i;
 705
 706        for_each_sg(sg, s, nents, i) {
 707                if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
 708                        s->offset += sg_dma_address(s);
 709                if (sg_dma_len(s))
 710                        s->length = sg_dma_len(s);
 711                sg_dma_address(s) = IOMMU_MAPPING_ERROR;
 712                sg_dma_len(s) = 0;
 713        }
 714}
 715
 716/*
 717 * The DMA API client is passing in a scatterlist which could describe
 718 * any old buffer layout, but the IOMMU API requires everything to be
 719 * aligned to IOMMU pages. Hence the need for this complicated bit of
 720 * impedance-matching, to be able to hand off a suitably-aligned list,
 721 * but still preserve the original offsets and sizes for the caller.
 722 */
 723int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 724                int nents, int prot)
 725{
 726        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 727        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 728        struct iova_domain *iovad = &cookie->iovad;
 729        struct scatterlist *s, *prev = NULL;
 730        dma_addr_t iova;
 731        size_t iova_len = 0;
 732        unsigned long mask = dma_get_seg_boundary(dev);
 733        int i;
 734
 735        /*
 736         * Work out how much IOVA space we need, and align the segments to
 737         * IOVA granules for the IOMMU driver to handle. With some clever
 738         * trickery we can modify the list in-place, but reversibly, by
 739         * stashing the unaligned parts in the as-yet-unused DMA fields.
 740         */
 741        for_each_sg(sg, s, nents, i) {
 742                size_t s_iova_off = iova_offset(iovad, s->offset);
 743                size_t s_length = s->length;
 744                size_t pad_len = (mask - iova_len + 1) & mask;
 745
 746                sg_dma_address(s) = s_iova_off;
 747                sg_dma_len(s) = s_length;
 748                s->offset -= s_iova_off;
 749                s_length = iova_align(iovad, s_length + s_iova_off);
 750                s->length = s_length;
 751
 752                /*
 753                 * Due to the alignment of our single IOVA allocation, we can
 754                 * depend on these assumptions about the segment boundary mask:
 755                 * - If mask size >= IOVA size, then the IOVA range cannot
 756                 *   possibly fall across a boundary, so we don't care.
 757                 * - If mask size < IOVA size, then the IOVA range must start
 758                 *   exactly on a boundary, therefore we can lay things out
 759                 *   based purely on segment lengths without needing to know
 760                 *   the actual addresses beforehand.
 761                 * - The mask must be a power of 2, so pad_len == 0 if
 762                 *   iova_len == 0, thus we cannot dereference prev the first
 763                 *   time through here (i.e. before it has a meaningful value).
 764                 */
 765                if (pad_len && pad_len < s_length - 1) {
 766                        prev->length += pad_len;
 767                        iova_len += pad_len;
 768                }
 769
 770                iova_len += s_length;
 771                prev = s;
 772        }
 773
 774        iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
 775        if (!iova)
 776                goto out_restore_sg;
 777
 778        /*
 779         * We'll leave any physical concatenation to the IOMMU driver's
 780         * implementation - it knows better than we do.
 781         */
 782        if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
 783                goto out_free_iova;
 784
 785        return __finalise_sg(dev, sg, nents, iova);
 786
 787out_free_iova:
 788        iommu_dma_free_iova(cookie, iova, iova_len);
 789out_restore_sg:
 790        __invalidate_sg(sg, nents);
 791        return 0;
 792}
 793
 794void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
 795                enum dma_data_direction dir, unsigned long attrs)
 796{
 797        dma_addr_t start, end;
 798        struct scatterlist *tmp;
 799        int i;
 800        /*
 801         * The scatterlist segments are mapped into a single
 802         * contiguous IOVA allocation, so this is incredibly easy.
 803         */
 804        start = sg_dma_address(sg);
 805        for_each_sg(sg_next(sg), tmp, nents - 1, i) {
 806                if (sg_dma_len(tmp) == 0)
 807                        break;
 808                sg = tmp;
 809        }
 810        end = sg_dma_address(sg) + sg_dma_len(sg);
 811        __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
 812}
 813
 814dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
 815                size_t size, enum dma_data_direction dir, unsigned long attrs)
 816{
 817        return __iommu_dma_map(dev, phys, size,
 818                        dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
 819}
 820
 821void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
 822                size_t size, enum dma_data_direction dir, unsigned long attrs)
 823{
 824        __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
 825}
 826
 827int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 828{
 829        return dma_addr == IOMMU_MAPPING_ERROR;
 830}
 831
 832static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
 833                phys_addr_t msi_addr, struct iommu_domain *domain)
 834{
 835        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 836        struct iommu_dma_msi_page *msi_page;
 837        dma_addr_t iova;
 838        int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
 839        size_t size = cookie_msi_granule(cookie);
 840
 841        msi_addr &= ~(phys_addr_t)(size - 1);
 842        list_for_each_entry(msi_page, &cookie->msi_page_list, list)
 843                if (msi_page->phys == msi_addr)
 844                        return msi_page;
 845
 846        msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
 847        if (!msi_page)
 848                return NULL;
 849
 850        iova = __iommu_dma_map(dev, msi_addr, size, prot);
 851        if (iommu_dma_mapping_error(dev, iova))
 852                goto out_free_page;
 853
 854        INIT_LIST_HEAD(&msi_page->list);
 855        msi_page->phys = msi_addr;
 856        msi_page->iova = iova;
 857        list_add(&msi_page->list, &cookie->msi_page_list);
 858        return msi_page;
 859
 860out_free_page:
 861        kfree(msi_page);
 862        return NULL;
 863}
 864
 865void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
 866{
 867        struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
 868        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 869        struct iommu_dma_cookie *cookie;
 870        struct iommu_dma_msi_page *msi_page;
 871        phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
 872        unsigned long flags;
 873
 874        if (!domain || !domain->iova_cookie)
 875                return;
 876
 877        cookie = domain->iova_cookie;
 878
 879        /*
 880         * We disable IRQs to rule out a possible inversion against
 881         * irq_desc_lock if, say, someone tries to retarget the affinity
 882         * of an MSI from within an IPI handler.
 883         */
 884        spin_lock_irqsave(&cookie->msi_lock, flags);
 885        msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
 886        spin_unlock_irqrestore(&cookie->msi_lock, flags);
 887
 888        if (WARN_ON(!msi_page)) {
 889                /*
 890                 * We're called from a void callback, so the best we can do is
 891                 * 'fail' by filling the message with obviously bogus values.
 892                 * Since we got this far due to an IOMMU being present, it's
 893                 * not like the existing address would have worked anyway...
 894                 */
 895                msg->address_hi = ~0U;
 896                msg->address_lo = ~0U;
 897                msg->data = ~0U;
 898        } else {
 899                msg->address_hi = upper_32_bits(msi_page->iova);
 900                msg->address_lo &= cookie_msi_granule(cookie) - 1;
 901                msg->address_lo += lower_32_bits(msi_page->iova);
 902        }
 903}
 904