linux/drivers/xen/swiotlb-xen.c
<<
>>
Prefs
   1/*
   2 *  Copyright 2010
   3 *  by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
   4 *
   5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License v2.0 as published by
   9 * the Free Software Foundation
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * PV guests under Xen are running in an non-contiguous memory architecture.
  17 *
  18 * When PCI pass-through is utilized, this necessitates an IOMMU for
  19 * translating bus (DMA) to virtual and vice-versa and also providing a
  20 * mechanism to have contiguous pages for device drivers operations (say DMA
  21 * operations).
  22 *
  23 * Specifically, under Xen the Linux idea of pages is an illusion. It
  24 * assumes that pages start at zero and go up to the available memory. To
  25 * help with that, the Linux Xen MMU provides a lookup mechanism to
  26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
  27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
  28 * memory is not contiguous. Xen hypervisor stitches memory for guests
  29 * from different pools, which means there is no guarantee that PFN==MFN
  30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
  31 * allocated in descending order (high to low), meaning the guest might
  32 * never get any MFN's under the 4GB mark.
  33 *
  34 */
  35
  36#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  37
  38#include <linux/bootmem.h>
  39#include <linux/dma-direct.h>
  40#include <linux/export.h>
  41#include <xen/swiotlb-xen.h>
  42#include <xen/page.h>
  43#include <xen/xen-ops.h>
  44#include <xen/hvc-console.h>
  45
  46#include <asm/dma-mapping.h>
  47#include <asm/xen/page-coherent.h>
  48
  49#include <trace/events/swiotlb.h>
  50/*
  51 * Used to do a quick range check in swiotlb_tbl_unmap_single and
  52 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
  53 * API.
  54 */
  55
  56#ifndef CONFIG_X86
  57static unsigned long dma_alloc_coherent_mask(struct device *dev,
  58                                            gfp_t gfp)
  59{
  60        unsigned long dma_mask = 0;
  61
  62        dma_mask = dev->coherent_dma_mask;
  63        if (!dma_mask)
  64                dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
  65
  66        return dma_mask;
  67}
  68#endif
  69
  70#define XEN_SWIOTLB_ERROR_CODE  (~(dma_addr_t)0x0)
  71
  72static char *xen_io_tlb_start, *xen_io_tlb_end;
  73static unsigned long xen_io_tlb_nslabs;
  74/*
  75 * Quick lookup value of the bus address of the IOTLB.
  76 */
  77
  78static u64 start_dma_addr;
  79
  80/*
  81 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
  82 * can be 32bit when dma_addr_t is 64bit leading to a loss in
  83 * information if the shift is done before casting to 64bit.
  84 */
  85static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
  86{
  87        unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
  88        dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
  89
  90        dma |= paddr & ~XEN_PAGE_MASK;
  91
  92        return dma;
  93}
  94
  95static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
  96{
  97        unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
  98        dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
  99        phys_addr_t paddr = dma;
 100
 101        paddr |= baddr & ~XEN_PAGE_MASK;
 102
 103        return paddr;
 104}
 105
 106static inline dma_addr_t xen_virt_to_bus(void *address)
 107{
 108        return xen_phys_to_bus(virt_to_phys(address));
 109}
 110
 111static int check_pages_physically_contiguous(unsigned long xen_pfn,
 112                                             unsigned int offset,
 113                                             size_t length)
 114{
 115        unsigned long next_bfn;
 116        int i;
 117        int nr_pages;
 118
 119        next_bfn = pfn_to_bfn(xen_pfn);
 120        nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
 121
 122        for (i = 1; i < nr_pages; i++) {
 123                if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
 124                        return 0;
 125        }
 126        return 1;
 127}
 128
 129static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
 130{
 131        unsigned long xen_pfn = XEN_PFN_DOWN(p);
 132        unsigned int offset = p & ~XEN_PAGE_MASK;
 133
 134        if (offset + size <= XEN_PAGE_SIZE)
 135                return 0;
 136        if (check_pages_physically_contiguous(xen_pfn, offset, size))
 137                return 0;
 138        return 1;
 139}
 140
 141static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
 142{
 143        unsigned long bfn = XEN_PFN_DOWN(dma_addr);
 144        unsigned long xen_pfn = bfn_to_local_pfn(bfn);
 145        phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
 146
 147        /* If the address is outside our domain, it CAN
 148         * have the same virtual address as another address
 149         * in our domain. Therefore _only_ check address within our domain.
 150         */
 151        if (pfn_valid(PFN_DOWN(paddr))) {
 152                return paddr >= virt_to_phys(xen_io_tlb_start) &&
 153                       paddr < virt_to_phys(xen_io_tlb_end);
 154        }
 155        return 0;
 156}
 157
 158static int max_dma_bits = 32;
 159
 160static int
 161xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
 162{
 163        int i, rc;
 164        int dma_bits;
 165        dma_addr_t dma_handle;
 166        phys_addr_t p = virt_to_phys(buf);
 167
 168        dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
 169
 170        i = 0;
 171        do {
 172                int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
 173
 174                do {
 175                        rc = xen_create_contiguous_region(
 176                                p + (i << IO_TLB_SHIFT),
 177                                get_order(slabs << IO_TLB_SHIFT),
 178                                dma_bits, &dma_handle);
 179                } while (rc && dma_bits++ < max_dma_bits);
 180                if (rc)
 181                        return rc;
 182
 183                i += slabs;
 184        } while (i < nslabs);
 185        return 0;
 186}
 187static unsigned long xen_set_nslabs(unsigned long nr_tbl)
 188{
 189        if (!nr_tbl) {
 190                xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
 191                xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
 192        } else
 193                xen_io_tlb_nslabs = nr_tbl;
 194
 195        return xen_io_tlb_nslabs << IO_TLB_SHIFT;
 196}
 197
 198enum xen_swiotlb_err {
 199        XEN_SWIOTLB_UNKNOWN = 0,
 200        XEN_SWIOTLB_ENOMEM,
 201        XEN_SWIOTLB_EFIXUP
 202};
 203
 204static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
 205{
 206        switch (err) {
 207        case XEN_SWIOTLB_ENOMEM:
 208                return "Cannot allocate Xen-SWIOTLB buffer\n";
 209        case XEN_SWIOTLB_EFIXUP:
 210                return "Failed to get contiguous memory for DMA from Xen!\n"\
 211                    "You either: don't have the permissions, do not have"\
 212                    " enough free memory under 4GB, or the hypervisor memory"\
 213                    " is too fragmented!";
 214        default:
 215                break;
 216        }
 217        return "";
 218}
 219int __ref xen_swiotlb_init(int verbose, bool early)
 220{
 221        unsigned long bytes, order;
 222        int rc = -ENOMEM;
 223        enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
 224        unsigned int repeat = 3;
 225
 226        xen_io_tlb_nslabs = swiotlb_nr_tbl();
 227retry:
 228        bytes = xen_set_nslabs(xen_io_tlb_nslabs);
 229        order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
 230        /*
 231         * Get IO TLB memory from any location.
 232         */
 233        if (early)
 234                xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
 235        else {
 236#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
 237#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
 238                while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
 239                        xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
 240                        if (xen_io_tlb_start)
 241                                break;
 242                        order--;
 243                }
 244                if (order != get_order(bytes)) {
 245                        pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
 246                                (PAGE_SIZE << order) >> 20);
 247                        xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
 248                        bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
 249                }
 250        }
 251        if (!xen_io_tlb_start) {
 252                m_ret = XEN_SWIOTLB_ENOMEM;
 253                goto error;
 254        }
 255        xen_io_tlb_end = xen_io_tlb_start + bytes;
 256        /*
 257         * And replace that memory with pages under 4GB.
 258         */
 259        rc = xen_swiotlb_fixup(xen_io_tlb_start,
 260                               bytes,
 261                               xen_io_tlb_nslabs);
 262        if (rc) {
 263                if (early)
 264                        free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
 265                else {
 266                        free_pages((unsigned long)xen_io_tlb_start, order);
 267                        xen_io_tlb_start = NULL;
 268                }
 269                m_ret = XEN_SWIOTLB_EFIXUP;
 270                goto error;
 271        }
 272        start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
 273        if (early) {
 274                if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
 275                         verbose))
 276                        panic("Cannot allocate SWIOTLB buffer");
 277                rc = 0;
 278        } else
 279                rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
 280
 281        if (!rc)
 282                swiotlb_set_max_segment(PAGE_SIZE);
 283
 284        return rc;
 285error:
 286        if (repeat--) {
 287                xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
 288                                        (xen_io_tlb_nslabs >> 1));
 289                pr_info("Lowering to %luMB\n",
 290                        (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
 291                goto retry;
 292        }
 293        pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
 294        if (early)
 295                panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
 296        else
 297                free_pages((unsigned long)xen_io_tlb_start, order);
 298        return rc;
 299}
 300
 301static void *
 302xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 303                           dma_addr_t *dma_handle, gfp_t flags,
 304                           unsigned long attrs)
 305{
 306        void *ret;
 307        int order = get_order(size);
 308        u64 dma_mask = DMA_BIT_MASK(32);
 309        phys_addr_t phys;
 310        dma_addr_t dev_addr;
 311
 312        /*
 313        * Ignore region specifiers - the kernel's ideas of
 314        * pseudo-phys memory layout has nothing to do with the
 315        * machine physical layout.  We can't allocate highmem
 316        * because we can't return a pointer to it.
 317        */
 318        flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
 319
 320        /* On ARM this function returns an ioremap'ped virtual address for
 321         * which virt_to_phys doesn't return the corresponding physical
 322         * address. In fact on ARM virt_to_phys only works for kernel direct
 323         * mapped RAM memory. Also see comment below.
 324         */
 325        ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
 326
 327        if (!ret)
 328                return ret;
 329
 330        if (hwdev && hwdev->coherent_dma_mask)
 331                dma_mask = dma_alloc_coherent_mask(hwdev, flags);
 332
 333        /* At this point dma_handle is the physical address, next we are
 334         * going to set it to the machine address.
 335         * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
 336         * to *dma_handle. */
 337        phys = *dma_handle;
 338        dev_addr = xen_phys_to_bus(phys);
 339        if (((dev_addr + size - 1 <= dma_mask)) &&
 340            !range_straddles_page_boundary(phys, size))
 341                *dma_handle = dev_addr;
 342        else {
 343                if (xen_create_contiguous_region(phys, order,
 344                                                 fls64(dma_mask), dma_handle) != 0) {
 345                        xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
 346                        return NULL;
 347                }
 348        }
 349        memset(ret, 0, size);
 350        return ret;
 351}
 352
 353static void
 354xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 355                          dma_addr_t dev_addr, unsigned long attrs)
 356{
 357        int order = get_order(size);
 358        phys_addr_t phys;
 359        u64 dma_mask = DMA_BIT_MASK(32);
 360
 361        if (hwdev && hwdev->coherent_dma_mask)
 362                dma_mask = hwdev->coherent_dma_mask;
 363
 364        /* do not use virt_to_phys because on ARM it doesn't return you the
 365         * physical address */
 366        phys = xen_bus_to_phys(dev_addr);
 367
 368        if (((dev_addr + size - 1 > dma_mask)) ||
 369            range_straddles_page_boundary(phys, size))
 370                xen_destroy_contiguous_region(phys, order);
 371
 372        xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
 373}
 374
 375/*
 376 * Map a single buffer of the indicated size for DMA in streaming mode.  The
 377 * physical address to use is returned.
 378 *
 379 * Once the device is given the dma address, the device owns this memory until
 380 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
 381 */
 382static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 383                                unsigned long offset, size_t size,
 384                                enum dma_data_direction dir,
 385                                unsigned long attrs)
 386{
 387        phys_addr_t map, phys = page_to_phys(page) + offset;
 388        dma_addr_t dev_addr = xen_phys_to_bus(phys);
 389
 390        BUG_ON(dir == DMA_NONE);
 391        /*
 392         * If the address happens to be in the device's DMA window,
 393         * we can safely return the device addr and not worry about bounce
 394         * buffering it.
 395         */
 396        if (dma_capable(dev, dev_addr, size) &&
 397            !range_straddles_page_boundary(phys, size) &&
 398                !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
 399                (swiotlb_force != SWIOTLB_FORCE)) {
 400                /* we are not interested in the dma_addr returned by
 401                 * xen_dma_map_page, only in the potential cache flushes executed
 402                 * by the function. */
 403                xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
 404                return dev_addr;
 405        }
 406
 407        /*
 408         * Oh well, have to allocate and map a bounce buffer.
 409         */
 410        trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
 411
 412        map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
 413                                     attrs);
 414        if (map == SWIOTLB_MAP_ERROR)
 415                return XEN_SWIOTLB_ERROR_CODE;
 416
 417        dev_addr = xen_phys_to_bus(map);
 418        xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
 419                                        dev_addr, map & ~PAGE_MASK, size, dir, attrs);
 420
 421        /*
 422         * Ensure that the address returned is DMA'ble
 423         */
 424        if (dma_capable(dev, dev_addr, size))
 425                return dev_addr;
 426
 427        attrs |= DMA_ATTR_SKIP_CPU_SYNC;
 428        swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
 429
 430        return XEN_SWIOTLB_ERROR_CODE;
 431}
 432
 433/*
 434 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
 435 * match what was provided for in a previous xen_swiotlb_map_page call.  All
 436 * other usages are undefined.
 437 *
 438 * After this call, reads by the cpu to the buffer are guaranteed to see
 439 * whatever the device wrote there.
 440 */
 441static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
 442                             size_t size, enum dma_data_direction dir,
 443                             unsigned long attrs)
 444{
 445        phys_addr_t paddr = xen_bus_to_phys(dev_addr);
 446
 447        BUG_ON(dir == DMA_NONE);
 448
 449        xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
 450
 451        /* NOTE: We use dev_addr here, not paddr! */
 452        if (is_xen_swiotlb_buffer(dev_addr)) {
 453                swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
 454                return;
 455        }
 456
 457        if (dir != DMA_FROM_DEVICE)
 458                return;
 459
 460        /*
 461         * phys_to_virt doesn't work with hihgmem page but we could
 462         * call dma_mark_clean() with hihgmem page here. However, we
 463         * are fine since dma_mark_clean() is null on POWERPC. We can
 464         * make dma_mark_clean() take a physical address if necessary.
 465         */
 466        dma_mark_clean(phys_to_virt(paddr), size);
 467}
 468
 469static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
 470                            size_t size, enum dma_data_direction dir,
 471                            unsigned long attrs)
 472{
 473        xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
 474}
 475
 476/*
 477 * Make physical memory consistent for a single streaming mode DMA translation
 478 * after a transfer.
 479 *
 480 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
 481 * using the cpu, yet do not wish to teardown the dma mapping, you must
 482 * call this function before doing so.  At the next point you give the dma
 483 * address back to the card, you must first perform a
 484 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
 485 */
 486static void
 487xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
 488                        size_t size, enum dma_data_direction dir,
 489                        enum dma_sync_target target)
 490{
 491        phys_addr_t paddr = xen_bus_to_phys(dev_addr);
 492
 493        BUG_ON(dir == DMA_NONE);
 494
 495        if (target == SYNC_FOR_CPU)
 496                xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
 497
 498        /* NOTE: We use dev_addr here, not paddr! */
 499        if (is_xen_swiotlb_buffer(dev_addr))
 500                swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
 501
 502        if (target == SYNC_FOR_DEVICE)
 503                xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
 504
 505        if (dir != DMA_FROM_DEVICE)
 506                return;
 507
 508        dma_mark_clean(phys_to_virt(paddr), size);
 509}
 510
 511void
 512xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
 513                                size_t size, enum dma_data_direction dir)
 514{
 515        xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
 516}
 517
 518void
 519xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
 520                                   size_t size, enum dma_data_direction dir)
 521{
 522        xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
 523}
 524
 525/*
 526 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
 527 * concerning calls here are the same as for swiotlb_unmap_page() above.
 528 */
 529static void
 530xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 531                           int nelems, enum dma_data_direction dir,
 532                           unsigned long attrs)
 533{
 534        struct scatterlist *sg;
 535        int i;
 536
 537        BUG_ON(dir == DMA_NONE);
 538
 539        for_each_sg(sgl, sg, nelems, i)
 540                xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
 541
 542}
 543
 544/*
 545 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 546 * This is the scatter-gather version of the above xen_swiotlb_map_page
 547 * interface.  Here the scatter gather list elements are each tagged with the
 548 * appropriate dma address and length.  They are obtained via
 549 * sg_dma_{address,length}(SG).
 550 *
 551 * NOTE: An implementation may be able to use a smaller number of
 552 *       DMA address/length pairs than there are SG table elements.
 553 *       (for example via virtual mapping capabilities)
 554 *       The routine returns the number of addr/length pairs actually
 555 *       used, at most nents.
 556 *
 557 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
 558 * same here.
 559 */
 560static int
 561xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 562                         int nelems, enum dma_data_direction dir,
 563                         unsigned long attrs)
 564{
 565        struct scatterlist *sg;
 566        int i;
 567
 568        BUG_ON(dir == DMA_NONE);
 569
 570        for_each_sg(sgl, sg, nelems, i) {
 571                phys_addr_t paddr = sg_phys(sg);
 572                dma_addr_t dev_addr = xen_phys_to_bus(paddr);
 573
 574                if (swiotlb_force == SWIOTLB_FORCE ||
 575                    xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
 576                    !dma_capable(hwdev, dev_addr, sg->length) ||
 577                    range_straddles_page_boundary(paddr, sg->length)) {
 578                        phys_addr_t map = swiotlb_tbl_map_single(hwdev,
 579                                                                 start_dma_addr,
 580                                                                 sg_phys(sg),
 581                                                                 sg->length,
 582                                                                 dir, attrs);
 583                        if (map == SWIOTLB_MAP_ERROR) {
 584                                dev_warn(hwdev, "swiotlb buffer is full\n");
 585                                /* Don't panic here, we expect map_sg users
 586                                   to do proper error handling. */
 587                                attrs |= DMA_ATTR_SKIP_CPU_SYNC;
 588                                xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
 589                                                           attrs);
 590                                sg_dma_len(sgl) = 0;
 591                                return 0;
 592                        }
 593                        dev_addr = xen_phys_to_bus(map);
 594                        xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
 595                                                dev_addr,
 596                                                map & ~PAGE_MASK,
 597                                                sg->length,
 598                                                dir,
 599                                                attrs);
 600                        sg->dma_address = dev_addr;
 601                } else {
 602                        /* we are not interested in the dma_addr returned by
 603                         * xen_dma_map_page, only in the potential cache flushes executed
 604                         * by the function. */
 605                        xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
 606                                                dev_addr,
 607                                                paddr & ~PAGE_MASK,
 608                                                sg->length,
 609                                                dir,
 610                                                attrs);
 611                        sg->dma_address = dev_addr;
 612                }
 613                sg_dma_len(sg) = sg->length;
 614        }
 615        return nelems;
 616}
 617
 618/*
 619 * Make physical memory consistent for a set of streaming mode DMA translations
 620 * after a transfer.
 621 *
 622 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 623 * and usage.
 624 */
 625static void
 626xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
 627                    int nelems, enum dma_data_direction dir,
 628                    enum dma_sync_target target)
 629{
 630        struct scatterlist *sg;
 631        int i;
 632
 633        for_each_sg(sgl, sg, nelems, i)
 634                xen_swiotlb_sync_single(hwdev, sg->dma_address,
 635                                        sg_dma_len(sg), dir, target);
 636}
 637
 638static void
 639xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
 640                            int nelems, enum dma_data_direction dir)
 641{
 642        xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
 643}
 644
 645static void
 646xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 647                               int nelems, enum dma_data_direction dir)
 648{
 649        xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
 650}
 651
 652/*
 653 * Return whether the given device DMA address mask can be supported
 654 * properly.  For example, if your device can only drive the low 24-bits
 655 * during bus mastering, then you would pass 0x00ffffff as the mask to
 656 * this function.
 657 */
 658static int
 659xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
 660{
 661        return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
 662}
 663
 664/*
 665 * Create userspace mapping for the DMA-coherent memory.
 666 * This function should be called with the pages from the current domain only,
 667 * passing pages mapped from other domains would lead to memory corruption.
 668 */
 669static int
 670xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 671                     void *cpu_addr, dma_addr_t dma_addr, size_t size,
 672                     unsigned long attrs)
 673{
 674#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
 675        if (xen_get_dma_ops(dev)->mmap)
 676                return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
 677                                                    dma_addr, size, attrs);
 678#endif
 679        return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
 680}
 681
 682/*
 683 * This function should be called with the pages from the current domain only,
 684 * passing pages mapped from other domains would lead to memory corruption.
 685 */
 686static int
 687xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
 688                        void *cpu_addr, dma_addr_t handle, size_t size,
 689                        unsigned long attrs)
 690{
 691#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
 692        if (xen_get_dma_ops(dev)->get_sgtable) {
 693#if 0
 694        /*
 695         * This check verifies that the page belongs to the current domain and
 696         * is not one mapped from another domain.
 697         * This check is for debug only, and should not go to production build
 698         */
 699                unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
 700                BUG_ON (!page_is_ram(bfn));
 701#endif
 702                return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
 703                                                           handle, size, attrs);
 704        }
 705#endif
 706        return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
 707}
 708
 709static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr)
 710{
 711        return dma_addr == XEN_SWIOTLB_ERROR_CODE;
 712}
 713
 714const struct dma_map_ops xen_swiotlb_dma_ops = {
 715        .alloc = xen_swiotlb_alloc_coherent,
 716        .free = xen_swiotlb_free_coherent,
 717        .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
 718        .sync_single_for_device = xen_swiotlb_sync_single_for_device,
 719        .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
 720        .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
 721        .map_sg = xen_swiotlb_map_sg_attrs,
 722        .unmap_sg = xen_swiotlb_unmap_sg_attrs,
 723        .map_page = xen_swiotlb_map_page,
 724        .unmap_page = xen_swiotlb_unmap_page,
 725        .dma_supported = xen_swiotlb_dma_supported,
 726        .mmap = xen_swiotlb_dma_mmap,
 727        .get_sgtable = xen_swiotlb_get_sgtable,
 728        .mapping_error  = xen_swiotlb_mapping_error,
 729};
 730