linux/drivers/iommu/dma-iommu.c
<<
>>
Prefs
   1/*
   2 * A fairly generic DMA-API to IOMMU-API glue layer.
   3 *
   4 * Copyright (C) 2014-2015 ARM Ltd.
   5 *
   6 * based in part on arch/arm/mm/dma-mapping.c:
   7 * Copyright (C) 2000-2004 Russell King
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  20 */
  21
  22#include <linux/acpi_iort.h>
  23#include <linux/device.h>
  24#include <linux/dma-iommu.h>
  25#include <linux/gfp.h>
  26#include <linux/huge_mm.h>
  27#include <linux/iommu.h>
  28#include <linux/iova.h>
  29#include <linux/irq.h>
  30#include <linux/mm.h>
  31#include <linux/pci.h>
  32#include <linux/scatterlist.h>
  33#include <linux/vmalloc.h>
  34
  35#define IOMMU_MAPPING_ERROR     0
  36
  37struct iommu_dma_msi_page {
  38        struct list_head        list;
  39        dma_addr_t              iova;
  40        phys_addr_t             phys;
  41};
  42
  43enum iommu_dma_cookie_type {
  44        IOMMU_DMA_IOVA_COOKIE,
  45        IOMMU_DMA_MSI_COOKIE,
  46};
  47
  48struct iommu_dma_cookie {
  49        enum iommu_dma_cookie_type      type;
  50        union {
  51                /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
  52                struct iova_domain      iovad;
  53                /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
  54                dma_addr_t              msi_iova;
  55        };
  56        struct list_head                msi_page_list;
  57        spinlock_t                      msi_lock;
  58};
  59
  60static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
  61{
  62        if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
  63                return cookie->iovad.granule;
  64        return PAGE_SIZE;
  65}
  66
  67static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
  68{
  69        struct iommu_dma_cookie *cookie;
  70
  71        cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
  72        if (cookie) {
  73                spin_lock_init(&cookie->msi_lock);
  74                INIT_LIST_HEAD(&cookie->msi_page_list);
  75                cookie->type = type;
  76        }
  77        return cookie;
  78}
  79
  80int iommu_dma_init(void)
  81{
  82        return iova_cache_get();
  83}
  84
  85/**
  86 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
  87 * @domain: IOMMU domain to prepare for DMA-API usage
  88 *
  89 * IOMMU drivers should normally call this from their domain_alloc
  90 * callback when domain->type == IOMMU_DOMAIN_DMA.
  91 */
  92int iommu_get_dma_cookie(struct iommu_domain *domain)
  93{
  94        if (domain->iova_cookie)
  95                return -EEXIST;
  96
  97        domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
  98        if (!domain->iova_cookie)
  99                return -ENOMEM;
 100
 101        return 0;
 102}
 103EXPORT_SYMBOL(iommu_get_dma_cookie);
 104
 105/**
 106 * iommu_get_msi_cookie - Acquire just MSI remapping resources
 107 * @domain: IOMMU domain to prepare
 108 * @base: Start address of IOVA region for MSI mappings
 109 *
 110 * Users who manage their own IOVA allocation and do not want DMA API support,
 111 * but would still like to take advantage of automatic MSI remapping, can use
 112 * this to initialise their own domain appropriately. Users should reserve a
 113 * contiguous IOVA region, starting at @base, large enough to accommodate the
 114 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
 115 * used by the devices attached to @domain.
 116 */
 117int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
 118{
 119        struct iommu_dma_cookie *cookie;
 120
 121        if (domain->type != IOMMU_DOMAIN_UNMANAGED)
 122                return -EINVAL;
 123
 124        if (domain->iova_cookie)
 125                return -EEXIST;
 126
 127        cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
 128        if (!cookie)
 129                return -ENOMEM;
 130
 131        cookie->msi_iova = base;
 132        domain->iova_cookie = cookie;
 133        return 0;
 134}
 135EXPORT_SYMBOL(iommu_get_msi_cookie);
 136
 137/**
 138 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
 139 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
 140 *          iommu_get_msi_cookie()
 141 *
 142 * IOMMU drivers should normally call this from their domain_free callback.
 143 */
 144void iommu_put_dma_cookie(struct iommu_domain *domain)
 145{
 146        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 147        struct iommu_dma_msi_page *msi, *tmp;
 148
 149        if (!cookie)
 150                return;
 151
 152        if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
 153                put_iova_domain(&cookie->iovad);
 154
 155        list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
 156                list_del(&msi->list);
 157                kfree(msi);
 158        }
 159        kfree(cookie);
 160        domain->iova_cookie = NULL;
 161}
 162EXPORT_SYMBOL(iommu_put_dma_cookie);
 163
 164/**
 165 * iommu_dma_get_resv_regions - Reserved region driver helper
 166 * @dev: Device from iommu_get_resv_regions()
 167 * @list: Reserved region list from iommu_get_resv_regions()
 168 *
 169 * IOMMU drivers can use this to implement their .get_resv_regions callback
 170 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
 171 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
 172 * reservation.
 173 */
 174void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
 175{
 176
 177        if (!is_of_node(dev->iommu_fwspec->iommu_fwnode))
 178                iort_iommu_msi_get_resv_regions(dev, list);
 179
 180}
 181EXPORT_SYMBOL(iommu_dma_get_resv_regions);
 182
 183static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
 184                phys_addr_t start, phys_addr_t end)
 185{
 186        struct iova_domain *iovad = &cookie->iovad;
 187        struct iommu_dma_msi_page *msi_page;
 188        int i, num_pages;
 189
 190        start -= iova_offset(iovad, start);
 191        num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
 192
 193        msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
 194        if (!msi_page)
 195                return -ENOMEM;
 196
 197        for (i = 0; i < num_pages; i++) {
 198                msi_page[i].phys = start;
 199                msi_page[i].iova = start;
 200                INIT_LIST_HEAD(&msi_page[i].list);
 201                list_add(&msi_page[i].list, &cookie->msi_page_list);
 202                start += iovad->granule;
 203        }
 204
 205        return 0;
 206}
 207
 208static void iova_reserve_pci_windows(struct pci_dev *dev,
 209                struct iova_domain *iovad)
 210{
 211        struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
 212        struct resource_entry *window;
 213        unsigned long lo, hi;
 214
 215        resource_list_for_each_entry(window, &bridge->windows) {
 216                if (resource_type(window->res) != IORESOURCE_MEM)
 217                        continue;
 218
 219                lo = iova_pfn(iovad, window->res->start - window->offset);
 220                hi = iova_pfn(iovad, window->res->end - window->offset);
 221                reserve_iova(iovad, lo, hi);
 222        }
 223}
 224
 225static int iova_reserve_iommu_regions(struct device *dev,
 226                struct iommu_domain *domain)
 227{
 228        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 229        struct iova_domain *iovad = &cookie->iovad;
 230        struct iommu_resv_region *region;
 231        LIST_HEAD(resv_regions);
 232        int ret = 0;
 233
 234        if (dev_is_pci(dev))
 235                iova_reserve_pci_windows(to_pci_dev(dev), iovad);
 236
 237        iommu_get_resv_regions(dev, &resv_regions);
 238        list_for_each_entry(region, &resv_regions, list) {
 239                unsigned long lo, hi;
 240
 241                /* We ARE the software that manages these! */
 242                if (region->type == IOMMU_RESV_SW_MSI)
 243                        continue;
 244
 245                lo = iova_pfn(iovad, region->start);
 246                hi = iova_pfn(iovad, region->start + region->length - 1);
 247                reserve_iova(iovad, lo, hi);
 248
 249                if (region->type == IOMMU_RESV_MSI)
 250                        ret = cookie_init_hw_msi_region(cookie, region->start,
 251                                        region->start + region->length);
 252                if (ret)
 253                        break;
 254        }
 255        iommu_put_resv_regions(dev, &resv_regions);
 256
 257        return ret;
 258}
 259
 260/**
 261 * iommu_dma_init_domain - Initialise a DMA mapping domain
 262 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
 263 * @base: IOVA at which the mappable address space starts
 264 * @size: Size of IOVA space
 265 * @dev: Device the domain is being initialised for
 266 *
 267 * @base and @size should be exact multiples of IOMMU page granularity to
 268 * avoid rounding surprises. If necessary, we reserve the page at address 0
 269 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
 270 * any change which could make prior IOVAs invalid will fail.
 271 */
 272int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
 273                u64 size, struct device *dev)
 274{
 275        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 276        struct iova_domain *iovad = &cookie->iovad;
 277        unsigned long order, base_pfn, end_pfn;
 278
 279        if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
 280                return -EINVAL;
 281
 282        /* Use the smallest supported page size for IOVA granularity */
 283        order = __ffs(domain->pgsize_bitmap);
 284        base_pfn = max_t(unsigned long, 1, base >> order);
 285        end_pfn = (base + size - 1) >> order;
 286
 287        /* Check the domain allows at least some access to the device... */
 288        if (domain->geometry.force_aperture) {
 289                if (base > domain->geometry.aperture_end ||
 290                    base + size <= domain->geometry.aperture_start) {
 291                        pr_warn("specified DMA range outside IOMMU capability\n");
 292                        return -EFAULT;
 293                }
 294                /* ...then finally give it a kicking to make sure it fits */
 295                base_pfn = max_t(unsigned long, base_pfn,
 296                                domain->geometry.aperture_start >> order);
 297        }
 298
 299        /* start_pfn is always nonzero for an already-initialised domain */
 300        if (iovad->start_pfn) {
 301                if (1UL << order != iovad->granule ||
 302                    base_pfn != iovad->start_pfn) {
 303                        pr_warn("Incompatible range for DMA domain\n");
 304                        return -EFAULT;
 305                }
 306
 307                return 0;
 308        }
 309
 310        init_iova_domain(iovad, 1UL << order, base_pfn);
 311        if (!dev)
 312                return 0;
 313
 314        return iova_reserve_iommu_regions(dev, domain);
 315}
 316EXPORT_SYMBOL(iommu_dma_init_domain);
 317
 318/**
 319 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
 320 *                    page flags.
 321 * @dir: Direction of DMA transfer
 322 * @coherent: Is the DMA master cache-coherent?
 323 * @attrs: DMA attributes for the mapping
 324 *
 325 * Return: corresponding IOMMU API page protection flags
 326 */
 327int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
 328                     unsigned long attrs)
 329{
 330        int prot = coherent ? IOMMU_CACHE : 0;
 331
 332        if (attrs & DMA_ATTR_PRIVILEGED)
 333                prot |= IOMMU_PRIV;
 334
 335        switch (dir) {
 336        case DMA_BIDIRECTIONAL:
 337                return prot | IOMMU_READ | IOMMU_WRITE;
 338        case DMA_TO_DEVICE:
 339                return prot | IOMMU_READ;
 340        case DMA_FROM_DEVICE:
 341                return prot | IOMMU_WRITE;
 342        default:
 343                return 0;
 344        }
 345}
 346
 347static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
 348                size_t size, dma_addr_t dma_limit, struct device *dev)
 349{
 350        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 351        struct iova_domain *iovad = &cookie->iovad;
 352        unsigned long shift, iova_len, iova = 0;
 353
 354        if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
 355                cookie->msi_iova += size;
 356                return cookie->msi_iova - size;
 357        }
 358
 359        shift = iova_shift(iovad);
 360        iova_len = size >> shift;
 361        /*
 362         * Freeing non-power-of-two-sized allocations back into the IOVA caches
 363         * will come back to bite us badly, so we have to waste a bit of space
 364         * rounding up anything cacheable to make sure that can't happen. The
 365         * order of the unadjusted size will still match upon freeing.
 366         */
 367        if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
 368                iova_len = roundup_pow_of_two(iova_len);
 369
 370        if (dev->bus_dma_mask)
 371                dma_limit &= dev->bus_dma_mask;
 372
 373        if (domain->geometry.force_aperture)
 374                dma_limit = min(dma_limit, domain->geometry.aperture_end);
 375
 376        /* Try to get PCI devices a SAC address */
 377        if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
 378                iova = alloc_iova_fast(iovad, iova_len,
 379                                       DMA_BIT_MASK(32) >> shift, false);
 380
 381        if (!iova)
 382                iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
 383                                       true);
 384
 385        return (dma_addr_t)iova << shift;
 386}
 387
 388static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
 389                dma_addr_t iova, size_t size)
 390{
 391        struct iova_domain *iovad = &cookie->iovad;
 392
 393        /* The MSI case is only ever cleaning up its most recent allocation */
 394        if (cookie->type == IOMMU_DMA_MSI_COOKIE)
 395                cookie->msi_iova -= size;
 396        else
 397                free_iova_fast(iovad, iova_pfn(iovad, iova),
 398                                size >> iova_shift(iovad));
 399}
 400
 401static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
 402                size_t size)
 403{
 404        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 405        struct iova_domain *iovad = &cookie->iovad;
 406        size_t iova_off = iova_offset(iovad, dma_addr);
 407
 408        dma_addr -= iova_off;
 409        size = iova_align(iovad, size + iova_off);
 410
 411        WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
 412        iommu_dma_free_iova(cookie, dma_addr, size);
 413}
 414
 415static void __iommu_dma_free_pages(struct page **pages, int count)
 416{
 417        while (count--)
 418                __free_page(pages[count]);
 419        kvfree(pages);
 420}
 421
 422static struct page **__iommu_dma_alloc_pages(unsigned int count,
 423                unsigned long order_mask, gfp_t gfp)
 424{
 425        struct page **pages;
 426        unsigned int i = 0, array_size = count * sizeof(*pages);
 427
 428        order_mask &= (2U << MAX_ORDER) - 1;
 429        if (!order_mask)
 430                return NULL;
 431
 432        if (array_size <= PAGE_SIZE)
 433                pages = kzalloc(array_size, GFP_KERNEL);
 434        else
 435                pages = vzalloc(array_size);
 436        if (!pages)
 437                return NULL;
 438
 439        /* IOMMU can map any pages, so himem can also be used here */
 440        gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
 441
 442        while (count) {
 443                struct page *page = NULL;
 444                unsigned int order_size;
 445
 446                /*
 447                 * Higher-order allocations are a convenience rather
 448                 * than a necessity, hence using __GFP_NORETRY until
 449                 * falling back to minimum-order allocations.
 450                 */
 451                for (order_mask &= (2U << __fls(count)) - 1;
 452                     order_mask; order_mask &= ~order_size) {
 453                        unsigned int order = __fls(order_mask);
 454
 455                        order_size = 1U << order;
 456                        page = alloc_pages((order_mask - order_size) ?
 457                                           gfp | __GFP_NORETRY : gfp, order);
 458                        if (!page)
 459                                continue;
 460                        if (!order)
 461                                break;
 462                        if (!PageCompound(page)) {
 463                                split_page(page, order);
 464                                break;
 465                        } else if (!split_huge_page(page)) {
 466                                break;
 467                        }
 468                        __free_pages(page, order);
 469                }
 470                if (!page) {
 471                        __iommu_dma_free_pages(pages, i);
 472                        return NULL;
 473                }
 474                count -= order_size;
 475                while (order_size--)
 476                        pages[i++] = page++;
 477        }
 478        return pages;
 479}
 480
 481/**
 482 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
 483 * @dev: Device which owns this buffer
 484 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
 485 * @size: Size of buffer in bytes
 486 * @handle: DMA address of buffer
 487 *
 488 * Frees both the pages associated with the buffer, and the array
 489 * describing them
 490 */
 491void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
 492                dma_addr_t *handle)
 493{
 494        __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
 495        __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
 496        *handle = IOMMU_MAPPING_ERROR;
 497}
 498
 499/**
 500 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
 501 * @dev: Device to allocate memory for. Must be a real device
 502 *       attached to an iommu_dma_domain
 503 * @size: Size of buffer in bytes
 504 * @gfp: Allocation flags
 505 * @attrs: DMA attributes for this allocation
 506 * @prot: IOMMU mapping flags
 507 * @handle: Out argument for allocated DMA handle
 508 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
 509 *              given VA/PA are visible to the given non-coherent device.
 510 *
 511 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
 512 * but an IOMMU which supports smaller pages might not map the whole thing.
 513 *
 514 * Return: Array of struct page pointers describing the buffer,
 515 *         or NULL on failure.
 516 */
 517struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
 518                unsigned long attrs, int prot, dma_addr_t *handle,
 519                void (*flush_page)(struct device *, const void *, phys_addr_t))
 520{
 521        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 522        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 523        struct iova_domain *iovad = &cookie->iovad;
 524        struct page **pages;
 525        struct sg_table sgt;
 526        dma_addr_t iova;
 527        unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
 528
 529        *handle = IOMMU_MAPPING_ERROR;
 530
 531        min_size = alloc_sizes & -alloc_sizes;
 532        if (min_size < PAGE_SIZE) {
 533                min_size = PAGE_SIZE;
 534                alloc_sizes |= PAGE_SIZE;
 535        } else {
 536                size = ALIGN(size, min_size);
 537        }
 538        if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
 539                alloc_sizes = min_size;
 540
 541        count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 542        pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
 543        if (!pages)
 544                return NULL;
 545
 546        size = iova_align(iovad, size);
 547        iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
 548        if (!iova)
 549                goto out_free_pages;
 550
 551        if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
 552                goto out_free_iova;
 553
 554        if (!(prot & IOMMU_CACHE)) {
 555                struct sg_mapping_iter miter;
 556                /*
 557                 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
 558                 * sufficient here, so skip it by using the "wrong" direction.
 559                 */
 560                sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
 561                while (sg_miter_next(&miter))
 562                        flush_page(dev, miter.addr, page_to_phys(miter.page));
 563                sg_miter_stop(&miter);
 564        }
 565
 566        if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
 567                        < size)
 568                goto out_free_sg;
 569
 570        *handle = iova;
 571        sg_free_table(&sgt);
 572        return pages;
 573
 574out_free_sg:
 575        sg_free_table(&sgt);
 576out_free_iova:
 577        iommu_dma_free_iova(cookie, iova, size);
 578out_free_pages:
 579        __iommu_dma_free_pages(pages, count);
 580        return NULL;
 581}
 582
 583/**
 584 * iommu_dma_mmap - Map a buffer into provided user VMA
 585 * @pages: Array representing buffer from iommu_dma_alloc()
 586 * @size: Size of buffer in bytes
 587 * @vma: VMA describing requested userspace mapping
 588 *
 589 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
 590 * for verifying the correct size and protection of @vma beforehand.
 591 */
 592
 593int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
 594{
 595        unsigned long uaddr = vma->vm_start;
 596        unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 597        int ret = -ENXIO;
 598
 599        for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
 600                ret = vm_insert_page(vma, uaddr, pages[i]);
 601                if (ret)
 602                        break;
 603                uaddr += PAGE_SIZE;
 604        }
 605        return ret;
 606}
 607
 608static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
 609                size_t size, int prot)
 610{
 611        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 612        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 613        size_t iova_off = 0;
 614        dma_addr_t iova;
 615
 616        if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
 617                iova_off = iova_offset(&cookie->iovad, phys);
 618                size = iova_align(&cookie->iovad, size + iova_off);
 619        }
 620
 621        iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
 622        if (!iova)
 623                return IOMMU_MAPPING_ERROR;
 624
 625        if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
 626                iommu_dma_free_iova(cookie, iova, size);
 627                return IOMMU_MAPPING_ERROR;
 628        }
 629        return iova + iova_off;
 630}
 631
 632dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 633                unsigned long offset, size_t size, int prot)
 634{
 635        return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
 636}
 637
 638void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
 639                enum dma_data_direction dir, unsigned long attrs)
 640{
 641        __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
 642}
 643
 644/*
 645 * Prepare a successfully-mapped scatterlist to give back to the caller.
 646 *
 647 * At this point the segments are already laid out by iommu_dma_map_sg() to
 648 * avoid individually crossing any boundaries, so we merely need to check a
 649 * segment's start address to avoid concatenating across one.
 650 */
 651static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
 652                dma_addr_t dma_addr)
 653{
 654        struct scatterlist *s, *cur = sg;
 655        unsigned long seg_mask = dma_get_seg_boundary(dev);
 656        unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
 657        int i, count = 0;
 658
 659        for_each_sg(sg, s, nents, i) {
 660                /* Restore this segment's original unaligned fields first */
 661                unsigned int s_iova_off = sg_dma_address(s);
 662                unsigned int s_length = sg_dma_len(s);
 663                unsigned int s_iova_len = s->length;
 664
 665                s->offset += s_iova_off;
 666                s->length = s_length;
 667                sg_dma_address(s) = IOMMU_MAPPING_ERROR;
 668                sg_dma_len(s) = 0;
 669
 670                /*
 671                 * Now fill in the real DMA data. If...
 672                 * - there is a valid output segment to append to
 673                 * - and this segment starts on an IOVA page boundary
 674                 * - but doesn't fall at a segment boundary
 675                 * - and wouldn't make the resulting output segment too long
 676                 */
 677                if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
 678                    (cur_len + s_length <= max_len)) {
 679                        /* ...then concatenate it with the previous one */
 680                        cur_len += s_length;
 681                } else {
 682                        /* Otherwise start the next output segment */
 683                        if (i > 0)
 684                                cur = sg_next(cur);
 685                        cur_len = s_length;
 686                        count++;
 687
 688                        sg_dma_address(cur) = dma_addr + s_iova_off;
 689                }
 690
 691                sg_dma_len(cur) = cur_len;
 692                dma_addr += s_iova_len;
 693
 694                if (s_length + s_iova_off < s_iova_len)
 695                        cur_len = 0;
 696        }
 697        return count;
 698}
 699
 700/*
 701 * If mapping failed, then just restore the original list,
 702 * but making sure the DMA fields are invalidated.
 703 */
 704static void __invalidate_sg(struct scatterlist *sg, int nents)
 705{
 706        struct scatterlist *s;
 707        int i;
 708
 709        for_each_sg(sg, s, nents, i) {
 710                if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
 711                        s->offset += sg_dma_address(s);
 712                if (sg_dma_len(s))
 713                        s->length = sg_dma_len(s);
 714                sg_dma_address(s) = IOMMU_MAPPING_ERROR;
 715                sg_dma_len(s) = 0;
 716        }
 717}
 718
 719/*
 720 * The DMA API client is passing in a scatterlist which could describe
 721 * any old buffer layout, but the IOMMU API requires everything to be
 722 * aligned to IOMMU pages. Hence the need for this complicated bit of
 723 * impedance-matching, to be able to hand off a suitably-aligned list,
 724 * but still preserve the original offsets and sizes for the caller.
 725 */
 726int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 727                int nents, int prot)
 728{
 729        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 730        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 731        struct iova_domain *iovad = &cookie->iovad;
 732        struct scatterlist *s, *prev = NULL;
 733        dma_addr_t iova;
 734        size_t iova_len = 0;
 735        unsigned long mask = dma_get_seg_boundary(dev);
 736        int i;
 737
 738        /*
 739         * Work out how much IOVA space we need, and align the segments to
 740         * IOVA granules for the IOMMU driver to handle. With some clever
 741         * trickery we can modify the list in-place, but reversibly, by
 742         * stashing the unaligned parts in the as-yet-unused DMA fields.
 743         */
 744        for_each_sg(sg, s, nents, i) {
 745                size_t s_iova_off = iova_offset(iovad, s->offset);
 746                size_t s_length = s->length;
 747                size_t pad_len = (mask - iova_len + 1) & mask;
 748
 749                sg_dma_address(s) = s_iova_off;
 750                sg_dma_len(s) = s_length;
 751                s->offset -= s_iova_off;
 752                s_length = iova_align(iovad, s_length + s_iova_off);
 753                s->length = s_length;
 754
 755                /*
 756                 * Due to the alignment of our single IOVA allocation, we can
 757                 * depend on these assumptions about the segment boundary mask:
 758                 * - If mask size >= IOVA size, then the IOVA range cannot
 759                 *   possibly fall across a boundary, so we don't care.
 760                 * - If mask size < IOVA size, then the IOVA range must start
 761                 *   exactly on a boundary, therefore we can lay things out
 762                 *   based purely on segment lengths without needing to know
 763                 *   the actual addresses beforehand.
 764                 * - The mask must be a power of 2, so pad_len == 0 if
 765                 *   iova_len == 0, thus we cannot dereference prev the first
 766                 *   time through here (i.e. before it has a meaningful value).
 767                 */
 768                if (pad_len && pad_len < s_length - 1) {
 769                        prev->length += pad_len;
 770                        iova_len += pad_len;
 771                }
 772
 773                iova_len += s_length;
 774                prev = s;
 775        }
 776
 777        iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
 778        if (!iova)
 779                goto out_restore_sg;
 780
 781        /*
 782         * We'll leave any physical concatenation to the IOMMU driver's
 783         * implementation - it knows better than we do.
 784         */
 785        if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
 786                goto out_free_iova;
 787
 788        return __finalise_sg(dev, sg, nents, iova);
 789
 790out_free_iova:
 791        iommu_dma_free_iova(cookie, iova, iova_len);
 792out_restore_sg:
 793        __invalidate_sg(sg, nents);
 794        return 0;
 795}
 796
 797void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
 798                enum dma_data_direction dir, unsigned long attrs)
 799{
 800        dma_addr_t start, end;
 801        struct scatterlist *tmp;
 802        int i;
 803        /*
 804         * The scatterlist segments are mapped into a single
 805         * contiguous IOVA allocation, so this is incredibly easy.
 806         */
 807        start = sg_dma_address(sg);
 808        for_each_sg(sg_next(sg), tmp, nents - 1, i) {
 809                if (sg_dma_len(tmp) == 0)
 810                        break;
 811                sg = tmp;
 812        }
 813        end = sg_dma_address(sg) + sg_dma_len(sg);
 814        __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
 815}
 816
 817dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
 818                size_t size, enum dma_data_direction dir, unsigned long attrs)
 819{
 820        return __iommu_dma_map(dev, phys, size,
 821                        dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
 822}
 823
 824void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
 825                size_t size, enum dma_data_direction dir, unsigned long attrs)
 826{
 827        __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
 828}
 829
 830int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 831{
 832        return dma_addr == IOMMU_MAPPING_ERROR;
 833}
 834
 835static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
 836                phys_addr_t msi_addr, struct iommu_domain *domain)
 837{
 838        struct iommu_dma_cookie *cookie = domain->iova_cookie;
 839        struct iommu_dma_msi_page *msi_page;
 840        dma_addr_t iova;
 841        int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
 842        size_t size = cookie_msi_granule(cookie);
 843
 844        msi_addr &= ~(phys_addr_t)(size - 1);
 845        list_for_each_entry(msi_page, &cookie->msi_page_list, list)
 846                if (msi_page->phys == msi_addr)
 847                        return msi_page;
 848
 849        msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
 850        if (!msi_page)
 851                return NULL;
 852
 853        iova = __iommu_dma_map(dev, msi_addr, size, prot);
 854        if (iommu_dma_mapping_error(dev, iova))
 855                goto out_free_page;
 856
 857        INIT_LIST_HEAD(&msi_page->list);
 858        msi_page->phys = msi_addr;
 859        msi_page->iova = iova;
 860        list_add(&msi_page->list, &cookie->msi_page_list);
 861        return msi_page;
 862
 863out_free_page:
 864        kfree(msi_page);
 865        return NULL;
 866}
 867
 868void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
 869{
 870        struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
 871        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 872        struct iommu_dma_cookie *cookie;
 873        struct iommu_dma_msi_page *msi_page;
 874        phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
 875        unsigned long flags;
 876
 877        if (!domain || !domain->iova_cookie)
 878                return;
 879
 880        cookie = domain->iova_cookie;
 881
 882        /*
 883         * We disable IRQs to rule out a possible inversion against
 884         * irq_desc_lock if, say, someone tries to retarget the affinity
 885         * of an MSI from within an IPI handler.
 886         */
 887        spin_lock_irqsave(&cookie->msi_lock, flags);
 888        msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
 889        spin_unlock_irqrestore(&cookie->msi_lock, flags);
 890
 891        if (WARN_ON(!msi_page)) {
 892                /*
 893                 * We're called from a void callback, so the best we can do is
 894                 * 'fail' by filling the message with obviously bogus values.
 895                 * Since we got this far due to an IOMMU being present, it's
 896                 * not like the existing address would have worked anyway...
 897                 */
 898                msg->address_hi = ~0U;
 899                msg->address_lo = ~0U;
 900                msg->data = ~0U;
 901        } else {
 902                msg->address_hi = upper_32_bits(msi_page->iova);
 903                msg->address_lo &= cookie_msi_granule(cookie) - 1;
 904                msg->address_lo += lower_32_bits(msi_page->iova);
 905        }
 906}
 907